source
stringlengths
3
86
python
stringlengths
75
1.04M
cache-rtsp.py
import multiprocessing import time import av import cv2 import threading import numpy as np import os from fractions import Fraction # Fraction(分子,分母) class Cache_frame(object): """ 设定地址于self.address\n 可选择"pyav"或"opencv"解码在self.encoding_tool """ def __init__(self): self.address = "http://ivi.bupt.edu.cn/hls/cctv1hd.m3u8" # self.encoding_tool = "pyav" self.last_frame = None self.top = 1 self.q = multiprocessing.Queue(1) def write(self) -> None: # p = psutil.Process() # p.cpu_affinity([4]) last = np.zeros(0,dtype=np.uint8) print('Process to write: %s' % os.getpid()) event = threading.Event() def running_av(): nonlocal last, event container = av.open(self.address, mode = 'r', options = {'rtsp_transport':'tcp'}) # container.streams.framerate = Fraction(25) container.streams.video[0].thread_type = 'AUTO' #多线程解码 for frame in container.decode(video=0): new_fream = frame.to_ndarray(format='bgr24') time.sleep(0.035) # i should make a function to control it last = new_fream event.set() def running_cv(): nonlocal last, event cap = cv2.VideoCapture(self.address) while True: success, new_fream = cap.read() if success: last = new_fream event.set() if self.encoding_tool == "pyav": t1=threading.Thread(target=running_av) elif self.encoding_tool == "opencv": t1=threading.Thread(target=running_cv) else: print("no such encoding tool") t1.start() while True: event.wait() self.q.put(last) event.clear() def read(self) -> None: # print("开始读取一帧") self.last_frame = self.q.get() # img = transform.resize(img, (270, 480)) # img = cv2.resize(img, (0, 0), fx=0.3, fy=0.3) return self.last_frame def run(self): pw = multiprocessing.Process(target=self.write) pw.start() if __name__ == '__main__': ca = Cache_frame() # ca.encoding_tool = "opencv" # ca.address = "rtsp://admin:admin@192.168.1.110:554/h264/ch40/main/av_stream" # 海康摄像机地址格式 t2=threading.Thread(target=ca.run) t2.start() while True: img = ca.read() img = cv2.resize(img, (0, 0), fx=0.3, fy=0.3) key = cv2.waitKey(1) & 0xFF cv2.imshow("video", img) if key == ord('q'): break
test_mongo_core.py
"""Testing the MongoDB core of cachier.""" from __future__ import print_function import sys import datetime from datetime import timedelta from random import random from time import sleep import threading try: import queue except ImportError: # python 2 import Queue as queue import pytest import pymongo import hashlib import pandas as pd from pymongo.mongo_client import MongoClient from pymongo.errors import OperationFailure from cachier import cachier from cachier.mongo_core import _MongoCore, RecalculationNeeded _TEST_HOST = 'ds119508.mlab.com' _TEST_PORT = 19508 _TEST_USERNAME = 'cachier_test' _TEST_PWD = 'ZGhjO5CQESYJ69U4z65G79YG' def _get_cachier_db_mongo_client(): client = MongoClient(host=_TEST_HOST, port=_TEST_PORT, retryWrites=False) client.cachier_test.authenticate( name=_TEST_USERNAME, password=_TEST_PWD, mechanism='SCRAM-SHA-1' ) return client _COLLECTION_NAME = 'cachier_test{}.{}.{}'.format( sys.version_info[0], sys.version_info[1], sys.version_info[2]) def _test_mongetter(): if not hasattr(_test_mongetter, 'client'): _test_mongetter.client = _get_cachier_db_mongo_client() db_obj = _test_mongetter.client['cachier_test'] if _COLLECTION_NAME not in db_obj.list_collection_names(): db_obj.create_collection(_COLLECTION_NAME) return db_obj[_COLLECTION_NAME] # === Mongo core tests === def test_information(): print("\npymongo version: ", end="") print(pymongo.__version__) @cachier(mongetter=_test_mongetter) def _test_mongo_caching(arg_1, arg_2): """Some function.""" return random() + arg_1 + arg_2 def test_mongo_index_creation(): """Basic Mongo core functionality.""" collection = _test_mongetter() _test_mongo_caching.clear_cache() val1 = _test_mongo_caching(1, 2) val2 = _test_mongo_caching(1, 2) assert val1 == val2 assert _MongoCore._INDEX_NAME in collection.index_information() def test_mongo_core(): """Basic Mongo core functionality.""" _test_mongo_caching.clear_cache() val1 = _test_mongo_caching(1, 2) val2 = _test_mongo_caching(1, 2) assert val1 == val2 val3 = _test_mongo_caching(1, 2, ignore_cache=True) assert val3 != val1 val4 = _test_mongo_caching(1, 2) assert val4 == val1 val5 = _test_mongo_caching(1, 2, overwrite_cache=True) assert val5 != val1 val6 = _test_mongo_caching(1, 2) assert val6 == val5 MONGO_DELTA = timedelta(seconds=3) MONGO_DELTA_LONG = timedelta(seconds=10) @cachier(mongetter=_test_mongetter, stale_after=MONGO_DELTA, next_time=False) def _stale_after_mongo(arg_1, arg_2): """Some function.""" return random() + arg_1 + arg_2 def test_mongo_stale_after(): """Testing MongoDB core stale_after functionality.""" _stale_after_mongo.clear_cache() val1 = _stale_after_mongo(1, 2) val2 = _stale_after_mongo(1, 2) assert val1 == val2 sleep(3) val3 = _stale_after_mongo(1, 2) assert val3 != val1 @cachier(mongetter=_test_mongetter) def _takes_time(arg_1, arg_2): """Some function.""" sleep(3) return random() + arg_1 + arg_2 def _calls_takes_time(res_queue): res = _takes_time(34, 82.3) res_queue.put(res) def test_mongo_being_calculated(): """Testing MongoDB core handling of being calculated scenarios.""" _takes_time.clear_cache() res_queue = queue.Queue() thread1 = threading.Thread( target=_calls_takes_time, kwargs={'res_queue': res_queue}) thread2 = threading.Thread( target=_calls_takes_time, kwargs={'res_queue': res_queue}) thread1.start() sleep(1) thread2.start() thread1.join() thread2.join() assert res_queue.qsize() == 2 res1 = res_queue.get() res2 = res_queue.get() assert res1 == res2 @cachier(mongetter=_test_mongetter, stale_after=MONGO_DELTA, next_time=False, wait_for_calc_timeout=2) def _wait_for_calc_timeout_mongo_fast(arg_1, arg_2): """Some function.""" sleep(1) return random() + arg_1 + arg_2 def _calls_wait_for_calc_timeout_mongo_fast(res_queue): res = _wait_for_calc_timeout_mongo_fast(1, 2) res_queue.put(res) def test_mongo_wait_for_calc_timeout_ok(): """ Testing calls that avoid timeouts store the values in cache. """ _wait_for_calc_timeout_mongo_fast.clear_cache() val1 = _wait_for_calc_timeout_mongo_fast(1, 2) val2 = _wait_for_calc_timeout_mongo_fast(1, 2) assert val1 == val2 res_queue = queue.Queue() thread1 = threading.Thread( target=_calls_wait_for_calc_timeout_mongo_fast, kwargs={'res_queue': res_queue}) thread2 = threading.Thread( target=_calls_wait_for_calc_timeout_mongo_fast, kwargs={'res_queue': res_queue}) thread1.start() thread2.start() sleep(2) thread1.join() thread2.join() assert res_queue.qsize() == 2 res1 = res_queue.get() res2 = res_queue.get() assert res1 == res2 # Timeout did not kick in, a single call was done @cachier(mongetter=_test_mongetter, stale_after=MONGO_DELTA_LONG, next_time=False, wait_for_calc_timeout=2) def _wait_for_calc_timeout_mongo_slow(arg_1, arg_2): sleep(3) return random() + arg_1 + arg_2 def _calls_wait_for_calc_timeout_mongo_slow(res_queue): res = _wait_for_calc_timeout_mongo_slow(1, 2) res_queue.put(res) def test_mongo_wait_for_calc_timeout_slow(): """Testing for calls timing out to be performed twice when needed.""" _wait_for_calc_timeout_mongo_slow.clear_cache() res_queue = queue.Queue() thread1 = threading.Thread( target=_calls_wait_for_calc_timeout_mongo_slow, kwargs={'res_queue': res_queue}) thread2 = threading.Thread( target=_calls_wait_for_calc_timeout_mongo_slow, kwargs={'res_queue': res_queue}) thread1.start() thread2.start() sleep(1) res3 = _wait_for_calc_timeout_mongo_slow(1, 2) sleep(4) thread1.join() thread2.join() assert res_queue.qsize() == 2 res1 = res_queue.get() res2 = res_queue.get() assert res1 != res2 # Timeout kicked in. Two calls were done res4 = _wait_for_calc_timeout_mongo_slow(1, 2) assert res1 == res4 or res2 == res4 or res3 == res4 # One of the cached values is returned class _BadMongoCollection: def __init__(self, mongetter): self.collection = mongetter() self.index_information = self.collection.index_information self.create_indexes = self.collection.create_indexes self.find_one = self.collection.find_one def delete_many(self, *args, **kwargs): # skipcq: PYL-R0201, PYL-W0613 pass def update_many(self, *args, **kwargs): # skipcq: PYL-R0201, PYL-W0613 pass def update_one(self, *args, **kwargs): # skipcq: PYL-R0201, PYL-W0613 raise OperationFailure(Exception()) def _bad_mongetter(): return _BadMongoCollection(_test_mongetter) @cachier(mongetter=_bad_mongetter) def _func_w_bad_mongo(arg_1, arg_2): """Some function.""" return random() + arg_1 + arg_2 def test_mongo_write_failure(): """Testing MongoDB core handling of writing failure scenarios.""" with pytest.raises(OperationFailure): val1 = _func_w_bad_mongo(1, 2) val2 = _func_w_bad_mongo(1, 2) assert val1 == val2 def test_mongo_clear_being_calculated(): """Testing MongoDB core clear_being_calculated.""" _func_w_bad_mongo.clear_being_calculated() def test_stalled_mongo_db_cache(): @cachier(mongetter=_test_mongetter) def _stalled_func(): return 1 core = _MongoCore(_test_mongetter, None, False, 0) core.set_func(_stalled_func) core.clear_cache() with pytest.raises(RecalculationNeeded): core.wait_on_entry_calc(key=None) def test_stalled_mong_db_core(monkeypatch): def mock_get_entry(self, args, kwargs, hash_params): # skipcq: PYL-R0201, PYL-W0613 return "key", {'being_calculated': True} def mock_get_entry_by_key(self, key): # skipcq: PYL-R0201, PYL-W0613 return "key", None monkeypatch.setattr( "cachier.mongo_core._MongoCore.get_entry", mock_get_entry) monkeypatch.setattr( "cachier.mongo_core._MongoCore.get_entry_by_key", mock_get_entry_by_key ) @cachier(mongetter=_test_mongetter) def _stalled_func(): return 1 res = _stalled_func() assert res == 1 def mock_get_entry_2(self, args, kwargs, hash_params): # skipcq: PYL-W0613 entry = { 'being_calculated': True, "value": 1, "time": datetime.datetime.now() - datetime.timedelta(seconds=10) } return "key", entry monkeypatch.setattr( "cachier.mongo_core._MongoCore.get_entry", mock_get_entry_2) stale_after = datetime.timedelta(seconds=1) @cachier(mongetter=_test_mongetter, stale_after=stale_after) def _stalled_func_2(): """Testing stalled function""" return 2 res = _stalled_func_2() assert res == 2 def test_callable_hash_param(): def _hash_params(args, kwargs): def _hash(obj): if isinstance(obj, pd.core.frame.DataFrame): return hashlib.sha256(pd.util.hash_pandas_object(obj).values.tobytes()).hexdigest() return obj k_args = tuple(map(_hash, args)) k_kwargs = tuple(sorted({k: _hash(v) for k, v in kwargs.items()}.items())) return k_args + k_kwargs @cachier(mongetter=_test_mongetter, hash_params=_hash_params) def _params_with_dataframe(*args, **kwargs): """Some function.""" return random() _params_with_dataframe.clear_cache() df_a = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3])) df_b = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3])) value_a = _params_with_dataframe(df_a, 1) value_b = _params_with_dataframe(df_b, 1) assert value_a == value_b # same content --> same key value_a = _params_with_dataframe(1, df=df_a) value_b = _params_with_dataframe(1, df=df_b) assert value_a == value_b # same content --> same key
driver_util.py
"""Scripts for drivers of Galaxy functional tests.""" import fcntl import logging import os import random import shutil import signal import socket import string import struct import subprocess import sys import tempfile import threading import time import nose.config import nose.core import nose.loader import nose.plugins.manager import yaml from paste import httpserver from six.moves import ( http_client, shlex_quote ) from six.moves.urllib.parse import urlparse from sqlalchemy_utils import ( create_database, database_exists, ) from galaxy.app import UniverseApplication as GalaxyUniverseApplication from galaxy.config import LOGGING_CONFIG_DEFAULT from galaxy.model import mapping from galaxy.model.tool_shed_install import mapping as toolshed_mapping from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool from galaxy.util import asbool, download_to_file from galaxy.util.properties import load_app_properties from galaxy.web import buildapp from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication from .api_util import get_master_api_key, get_user_api_key from .instrument import StructuredTestDataPlugin from .nose_util import run from .test_logging import logging_config_file galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) DEFAULT_WEB_HOST = socket.gethostbyname('localhost') DEFAULT_CONFIG_PREFIX = "GALAXY" GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test") GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git" TOOL_SHED_TEST_DATA = os.path.join(GALAXY_TEST_DIRECTORY, "shed_functional", "test_data") TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks") FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools") FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml") FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml") FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml") MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml' INSTALLED_TOOL_PANEL_CONFIGS = [ os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml') ] REALTIME_PROXY_TEMPLATE = string.Template(r""" uwsgi: realtime_map: $tempdir/realtime_map.sqlite python-raw: scripts/realtime/key_type_token_mapping.py route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(realtime\.$test_host:$test_port)$ goto:realtime route-run: goto:endendend route-label: realtime route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(realtime\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $2 $1 $3 $4 $0 5 route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST} route-label: endendend """) DEFAULT_LOCALES = "en" log = logging.getLogger("test_driver") # Global variables to pass database contexts around - only needed for older # Tool Shed twill tests that didn't utilize the API for such interactions. galaxy_context = None tool_shed_context = None install_context = None def setup_tool_shed_tmp_dir(): tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None) if tool_shed_test_tmp_dir is None: tool_shed_test_tmp_dir = tempfile.mkdtemp() # Here's the directory where everything happens. Temporary directories are created within this directory to contain # the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP, # the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters. os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir return tool_shed_test_tmp_dir def get_galaxy_test_tmp_dir(): """Create test directory for use by Galaxy server being setup for testing.""" galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None) if galaxy_test_tmp_dir is None: galaxy_test_tmp_dir = tempfile.mkdtemp() return galaxy_test_tmp_dir def configure_environment(): """Hack up environment for test cases.""" # no op remove if unused if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES # Used by get_filename in tool shed's twilltestcase. if "TOOL_SHED_TEST_FILE_DIR" not in os.environ: os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1" def build_logger(): """Build a logger for test driver script.""" return log def ensure_test_file_dir_set(): """Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver. Return first directory for backward compat. """ galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR) os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir first_test_file_dir = galaxy_test_file_dir.split(",")[0] return first_test_file_dir def setup_galaxy_config( tmpdir, use_test_file_dir=False, default_install_db_merged=True, default_tool_data_table_config_path=None, default_shed_tool_data_table_config=None, default_job_config_file=None, enable_tool_shed_check=False, default_tool_conf=None, shed_tool_conf=None, datatypes_conf=None, update_integrated_tool_panel=False, prefer_template_database=False, log_format=None, conda_auto_init=False, conda_auto_install=False, use_shared_connection_for_amqp=False, ): """Setup environment and build config for test Galaxy instance.""" # For certain docker operations this needs to be evaluated out - e.g. for cwltool. tmpdir = os.path.realpath(tmpdir) if not os.path.exists(tmpdir): os.makedirs(tmpdir) template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir) new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir) job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir) if use_test_file_dir: first_test_file_dir = ensure_test_file_dir_set() if not os.path.isabs(first_test_file_dir): first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir) library_import_dir = first_test_file_dir import_dir = os.path.join(first_test_file_dir, 'users') if os.path.exists(import_dir): user_library_import_dir = import_dir else: user_library_import_dir = None else: user_library_import_dir = None library_import_dir = None job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file) tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools') tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path) default_data_manager_config = None for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']: if os.path.exists(data_manager_config): default_data_manager_config = data_manager_config data_manager_config_file = "test/functional/tools/sample_data_manager_conf.xml" if default_data_manager_config is not None: data_manager_config_file = "%s,%s" % (default_data_manager_config, data_manager_config_file) master_api_key = get_master_api_key() cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess' # Data Manager testing temp path # For storing Data Manager outputs and .loc files so that real ones don't get clobbered galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir) tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf) conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install) conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init) conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX') if tool_conf is None: # As a fallback always at least allow upload. tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF if shed_tool_conf is not None: tool_conf = "%s,%s" % (tool_conf, shed_tool_conf) shed_tool_data_table_config = default_shed_tool_data_table_config config = dict( admin_users='test@bx.psu.edu', allow_library_path_paste=True, allow_user_creation=True, allow_user_deletion=True, api_allow_run_as='test@bx.psu.edu', auto_configure_logging=logging_config_file is None, check_migrate_tools=False, chunk_upload_size=100, conda_prefix=conda_prefix, conda_auto_init=conda_auto_init, conda_auto_install=conda_auto_install, cleanup_job=cleanup_job, retry_metadata_internally=False, data_dir=tmpdir, data_manager_config_file=data_manager_config_file, enable_beta_tool_formats=True, expose_dataset_path=True, ftp_upload_purge=False, galaxy_data_manager_data_path=galaxy_data_manager_data_path, id_secret='changethisinproductiontoo', job_config_file=job_config_file, job_working_directory=job_working_directory, library_import_dir=library_import_dir, log_destination="stdout", new_file_path=new_file_path, override_tempdir=False, master_api_key=master_api_key, running_functional_tests=True, shed_tool_data_table_config=shed_tool_data_table_config, template_cache_path=template_cache_path, template_path='templates', tool_config_file=tool_conf, tool_data_table_config_path=tool_data_table_config_path, tool_parse_help=False, tool_path=tool_path, update_integrated_tool_panel=update_integrated_tool_panel, use_tasked_jobs=True, use_heartbeat=False, user_library_import_dir=user_library_import_dir, webhooks_dir=TEST_WEBHOOKS_DIR, logging=LOGGING_CONFIG_DEFAULT, monitor_thread_join_timeout=5, object_store_store_by="uuid", ) if not use_shared_connection_for_amqp: config["amqp_internal_connection"] = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(tmpdir, "control.sqlite") config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database)) config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged)) if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")): object_store_config = os.path.join(tmpdir, "object_store_conf.yml") with open(object_store_config, "w") as f: contents = """ type: hierarchical backends: - id: files1 type: disk weight: 1 files_dir: "${temp_directory}/files1" extra_dirs: - type: temp path: "${temp_directory}/tmp1" - type: job_work path: "${temp_directory}/job_working_directory1" - id: files2 type: disk weight: 1 files_dir: "${temp_directory}/files2" extra_dirs: - type: temp path: "${temp_directory}/tmp2" - type: job_work path: "${temp_directory}/job_working_directory2" """ contents_template = string.Template(contents) expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir) f.write(expanded_contents) config["object_store_config_file"] = object_store_config if datatypes_conf is not None: config['datatypes_config_file'] = datatypes_conf if enable_tool_shed_check: config["enable_tool_shed_check"] = enable_tool_shed_check config["hours_between_check"] = 0.001 tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR') if tool_dependency_dir: config["tool_dependency_dir"] = tool_dependency_dir # Used by shed's twill dependency stuff # TODO: read from Galaxy's config API. os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies') return config def _tool_data_table_config_path(default_tool_data_table_config_path=None): tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path) if tool_data_table_config_path is None: # ... otherise find whatever Galaxy would use as the default and # the sample data for fucntional tests to that. default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample' for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']: if os.path.exists(tool_data_config): default_tool_data_config = tool_data_config tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config return tool_data_table_config_path def nose_config_and_run(argv=None, env=None, ignore_files=[], plugins=None): """Setup a nose context and run tests. Tests are specified by argv (defaulting to sys.argv). """ if env is None: env = os.environ if plugins is None: plugins = nose.plugins.manager.DefaultPluginManager() if argv is None: argv = sys.argv test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=plugins, ) # Add custom plugin to produce JSON data used by planemo. test_config.plugins.addPlugin(StructuredTestDataPlugin()) test_config.configure(argv) result = run(test_config) success = result.wasSuccessful() return success def copy_database_template(source, db_path): """Copy a 'clean' sqlite template database. From file or URL to specified path for sqlite database. """ db_path_dir = os.path.dirname(db_path) if not os.path.exists(db_path_dir): os.makedirs(db_path_dir) if os.path.exists(source): shutil.copy(source, db_path) assert os.path.exists(db_path) elif source.lower().startswith(("http://", "https://", "ftp://")): try: download_to_file(source, db_path) except Exception as e: # We log the exception but don't fail startup, since we can # do all migration steps instead of downloading a template. log.exception(e) else: raise Exception("Failed to copy database template from source %s" % source) def database_conf(db_path, prefix="GALAXY", prefer_template_database=False): """Find (and populate if needed) Galaxy database connection.""" database_auto_migrate = False check_migrate_databases = True dburi_var = "%s_TEST_DBURI" % prefix template_name = None if dburi_var in os.environ: database_connection = os.environ[dburi_var] # only template if postgres - not mysql or sqlite do_template = prefer_template_database and database_connection.startswith("p") if do_template: database_template_parsed = urlparse(database_connection) template_name = database_template_parsed.path[1:] # drop / from /galaxy actual_db = "gxtest" + ''.join(random.choice(string.ascii_uppercase) for _ in range(10)) actual_database_parsed = database_template_parsed._replace(path="/%s" % actual_db) database_connection = actual_database_parsed.geturl() if not database_exists(database_connection): # We pass by migrations and instantiate the current table create_database(database_connection) mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True) toolshed_mapping.init(database_connection, create_tables=True) check_migrate_databases = False else: default_db_filename = "%s.sqlite" % prefix.lower() template_var = "%s_TEST_DB_TEMPLATE" % prefix db_path = os.path.join(db_path, default_db_filename) if template_var in os.environ: # Middle ground between recreating a completely new # database and pointing at existing database with # GALAXY_TEST_DBURI. The former requires a lot of setup # time, the latter results in test failures in certain # cases (namely tool shed tests expecting clean database). copy_database_template(os.environ[template_var], db_path) database_auto_migrate = True database_connection = 'sqlite:///%s' % db_path config = { "check_migrate_databases": check_migrate_databases, "database_connection": database_connection, "database_auto_migrate": database_auto_migrate } if not database_connection.startswith("sqlite://"): config["database_engine_option_max_overflow"] = "20" config["database_engine_option_pool_size"] = "10" if template_name: config["database_template"] = template_name return config def install_database_conf(db_path, default_merged=False): if 'GALAXY_TEST_INSTALL_DBURI' in os.environ: install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI'] elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)): install_galaxy_database_connection = None else: install_galaxy_db_path = os.path.join(db_path, 'install.sqlite') install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path conf = {} if install_galaxy_database_connection is not None: conf["install_database_connection"] = install_galaxy_database_connection return conf def database_files_path(test_tmpdir, prefix="GALAXY"): """Create a mock database/ directory like in GALAXY_ROOT. Use prefix to default this if TOOL_SHED_TEST_DBPATH or GALAXY_TEST_DBPATH is set in the environment. """ environ_var = "%s_TEST_DBPATH" % prefix if environ_var in os.environ: db_path = os.environ[environ_var] else: tempdir = tempfile.mkdtemp(dir=test_tmpdir) db_path = os.path.join(tempdir, 'database') return db_path def _get_static_settings(): """Configuration required for Galaxy static middleware. Returns dictionary of the settings necessary for a galaxy App to be wrapped in the static middleware. This mainly consists of the filesystem locations of url-mapped static resources. """ static_dir = os.path.join(galaxy_root, "static") # TODO: these should be copied from config/galaxy.ini return dict( static_enabled=True, static_cache_time=360, static_dir=static_dir, static_images_dir=os.path.join(static_dir, 'images', ''), static_favicon_dir=os.path.join(static_dir, 'favicon.ico'), static_scripts_dir=os.path.join(static_dir, 'scripts', ''), static_style_dir=os.path.join(static_dir, 'style', 'blue'), static_robots_txt=os.path.join(static_dir, 'robots.txt'), ) def get_webapp_global_conf(): """Get the global_conf dictionary sent to ``app_factory``.""" # (was originally sent 'dict()') - nothing here for now except static settings global_conf = dict() global_conf.update(_get_static_settings()) return global_conf def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150): """Wait for an HTTP server to boot up.""" # Test if the server is up for i in range(sleep_tries): # directly test the app, not the proxy conn = http_client.HTTPConnection(host, port) try: conn.request("GET", "/") response = conn.getresponse() if response.status == 200: break except socket.error as e: if e.errno not in [61, 111]: raise time.sleep(sleep_amount) else: template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries" message = template % (host, port) raise Exception(message) def attempt_ports(port): if port is not None: yield port raise Exception("An existing process seems bound to specified test server port [%s]" % port) else: random.seed() for i in range(0, 9): port = str(random.randint(8000, 10000)) yield port raise Exception("Unable to open a port between %s and %s to start Galaxy server" % (8000, 10000)) def serve_webapp(webapp, port=None, host=None): """Serve the webapp on a recommend port or a free one. Return the port the webapp is running on. """ server = None for port in attempt_ports(port): try: server = httpserver.serve(webapp, host=host, port=port, start_loop=False) break except socket.error as e: if e[0] == 98: continue raise t = threading.Thread(target=server.serve_forever) t.start() return server, port def cleanup_directory(tempdir): """Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set. Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons. """ skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ if skip_cleanup: log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir) return try: if os.path.exists(tempdir) and not skip_cleanup: shutil.rmtree(tempdir) except Exception: pass def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools): """Modify Galaxy app's toolbox for migrated or installed tool tests.""" if testing_installed_tools: # TODO: Do this without modifying app - that is a pretty violation # of Galaxy's abstraction - we shouldn't require app at all let alone # be modifying it. tool_configs = app.config.tool_configs # Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs, # and reload the app's toolbox. relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG) if relative_migrated_tool_panel_config in tool_configs: tool_configs.remove(relative_migrated_tool_panel_config) for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS: tool_configs.append(installed_tool_panel_config) from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402 app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app) def build_galaxy_app(simple_kwargs): """Build a Galaxy app object from a simple keyword arguments. Construct paste style complex dictionary and use load_app_properties so Galaxy override variables are respected. Also setup "global" references to sqlalchemy database context for Galaxy and install databases. """ log.info("Galaxy database connection: %s", simple_kwargs["database_connection"]) simple_kwargs['global_conf'] = get_webapp_global_conf() simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample" simple_kwargs = load_app_properties( kwds=simple_kwargs ) # Build the Universe Application app = GalaxyUniverseApplication(**simple_kwargs) log.info("Embedded Galaxy application started") global galaxy_context global install_context galaxy_context = app.model.context install_context = app.install_model.context return app def build_shed_app(simple_kwargs): """Build a Galaxy app object from a simple keyword arguments. Construct paste style complex dictionary. Also setup "global" reference to sqlalchemy database context for tool shed database. """ log.info("Tool shed database connection: %s", simple_kwargs["database_connection"]) # TODO: Simplify global_conf to match Galaxy above... simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample' simple_kwargs['global_conf'] = get_webapp_global_conf() app = ToolshedUniverseApplication(**simple_kwargs) log.info("Embedded Toolshed application started") global tool_shed_context tool_shed_context = app.model.context return app class classproperty(object): def __init__(self, f): self.f = f def __get__(self, obj, owner): return self.f(owner) def get_ip_address(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15].encode('utf-8')) )[20:24]) def explicitly_configured_host_and_port(prefix, config_object): host_env_key = "%s_TEST_HOST" % prefix port_env_key = "%s_TEST_PORT" % prefix port_random_env_key = "%s_TEST_PORT_RANDOM" % prefix default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST) host = os.environ.get(host_env_key, default_web_host) if os.environ.get(port_random_env_key, None) is not None: # Ignore the port environment variable, it wasn't explictly configured. port = None else: port = os.environ.get(port_env_key, None) # If an explicit port wasn't assigned for this test or test case, set this # environment variable so we know it is random. We can then randomly re-assign # for new tests. if port is None: os.environ["GALAXY_TEST_PORT_RANDOM"] = "1" return host, port def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150): host_env_key = "%s_TEST_HOST" % prefix port_env_key = "%s_TEST_PORT" % prefix os.environ[host_env_key] = host os.environ[port_env_key] = port wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries) class ServerWrapper(object): def __init__(self, name, host, port): self.name = name self.host = host self.port = port @property def app(self): raise NotImplementedError("Test can be run against target - requires a Galaxy app object.") def stop(self): raise NotImplementedError() class PasteServerWrapper(ServerWrapper): def __init__(self, app, server, name, host, port): super(PasteServerWrapper, self).__init__(name, host, port) self._app = app self._server = server @property def app(self): return self._app def stop(self): if self._server is not None: log.info("Shutting down embedded %s web server" % self.name) self._server.server_close() log.info("Embedded web server %s stopped" % self.name) if self._app is not None: log.info("Stopping application %s" % self.name) self._app.shutdown() log.info("Application %s stopped." % self.name) class UwsgiServerWrapper(ServerWrapper): def __init__(self, p, name, host, port): super(UwsgiServerWrapper, self).__init__(name, host, port) self._p = p self._r = None self._t = threading.Thread(target=self.wait) self._t.start() def __del__(self): self._t.join() def wait(self): self._r = self._p.wait() def stop(self): try: os.killpg(os.getpgid(self._p.pid), signal.SIGTERM) except Exception: pass time.sleep(.1) try: os.killpg(os.getpgid(self._p.pid), signal.SIGKILL) except Exception: pass self._t.join() def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None): name = prefix.lower() host, port = explicitly_configured_host_and_port(prefix, config_object) config = {} config["galaxy"] = kwargs.copy() enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False) if enable_realtime_mapping: config["galaxy"]["realtime_prefix"] = "realtime" config["galaxy"]["realtime_map"] = os.path.join(tempdir, "realtime_map.sqlite") yaml_config_path = os.path.join(tempdir, "galaxy.yml") with open(yaml_config_path, "w") as f: yaml.dump(config, f) if enable_realtime_mapping: # Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( - # though maybe it would work? with open(yaml_config_path, "r") as f: old_contents = f.read() with open(yaml_config_path, "w") as f: test_port = str(port) if port else r"[0-9]+" test_host = host or "localhost" uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir) f.write(uwsgi_section) f.write(old_contents) def attempt_port_bind(port): uwsgi_command = [ "uwsgi", "--http", "%s:%s" % (host, port), "--yaml", yaml_config_path, "--module", "galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()", "--enable-threads", "--die-on-term", ] for p in sys.path: uwsgi_command.append('--pythonpath') uwsgi_command.append(p) handle_uwsgi_cli_command = getattr( config_object, "handle_uwsgi_cli_command", None ) if handle_uwsgi_cli_command is not None: handle_uwsgi_cli_command(uwsgi_command) # we don't want to quote every argument but we don't want to print unquoted ones either, so do this log.info("Starting uwsgi with command line: %s", ' '.join([shlex_quote(x) for x in uwsgi_command])) p = subprocess.Popen( uwsgi_command, cwd=galaxy_root, preexec_fn=os.setsid, ) return UwsgiServerWrapper( p, name, host, port ) for port in attempt_ports(port): server_wrapper = attempt_port_bind(port) try: set_and_wait_for_http_target(prefix, host, port, sleep_tries=50) log.info("Test-managed uwsgi web server for %s started at %s:%s" % (name, host, port)) return server_wrapper except Exception: server_wrapper.stop() def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None): """Launch a web server for a given app using supplied factory. Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are all set after this method has been called. """ name = prefix.lower() host, port = explicitly_configured_host_and_port(prefix, config_object) webapp = webapp_factory( kwargs['global_conf'], app=app, use_translogger=False, static_enabled=True, register_shutdown_at_exit=False ) server, port = serve_webapp( webapp, host=host, port=port ) set_and_wait_for_http_target(prefix, host, port) log.info("Embedded paste web server for %s started at %s:%s" % (name, host, port)) return PasteServerWrapper( app, server, name, host, port ) class TestDriver(object): """Responsible for the life-cycle of a Galaxy-style functional test. Sets up servers, configures tests, runs nose, and tears things down. This is somewhat like a Python TestCase - but different because it is meant to provide a main() endpoint. """ def __init__(self): """Setup tracked resources.""" self.server_wrappers = [] self.temp_directories = [] def setup(self): """Called before tests are built.""" def build_tests(self): """After environment is setup, setup nose tests.""" def tear_down(self): """Cleanup resources tracked by this object.""" self.stop_servers() for temp_directory in self.temp_directories: cleanup_directory(temp_directory) def stop_servers(self): for server_wrapper in self.server_wrappers: server_wrapper.stop() self.server_wrappers = [] def mkdtemp(self): """Return a temp directory that is properly cleaned up or not based on the config.""" temp_directory = tempfile.mkdtemp() self.temp_directories.append(temp_directory) return temp_directory def run(self): """Driver whole test. Setup environment, build tests (if needed), run test, and finally cleanup resources. """ configure_environment() self.setup() self.build_tests() try: success = nose_config_and_run() return 0 if success else 1 except Exception as e: log.info("Failure running tests") raise e finally: log.info("Shutting down") self.tear_down() class GalaxyTestDriver(TestDriver): """Instantial a Galaxy-style nose TestDriver for testing Galaxy.""" testing_shed_tools = False def _configure(self, config_object=None): """Setup various variables used to launch a Galaxy server.""" config_object = self._ensure_config_object(config_object) self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None) # Allow a particular test to force uwsgi or any test to use uwsgi with # the GALAXY_TEST_UWSGI environment variable. use_uwsgi = os.environ.get('GALAXY_TEST_UWSGI', None) if not use_uwsgi: if getattr(config_object, "require_uwsgi", None): use_uwsgi = True self.use_uwsgi = use_uwsgi # Allow controlling the log format log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None) if not log_format and use_uwsgi: log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \ "[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \ "[%(threadName)s] %(message)s" self.log_format = log_format self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir() self.temp_directories.append(self.galaxy_test_tmp_dir) self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False) if getattr(config_object, "framework_tool_and_types", False): default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF datatypes_conf_override = FRAMEWORK_DATATYPES_CONF else: default_tool_conf = getattr(config_object, "default_tool_conf", None) datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None) self.default_tool_conf = default_tool_conf self.datatypes_conf_override = datatypes_conf_override def setup(self, config_object=None): """Setup a Galaxy server for functional test (if needed). Configuration options can be specified as attributes on the supplied ```config_object``` (defaults to self). """ self._saved_galaxy_config = None self._configure(config_object) self._register_and_run_servers(config_object) def restart(self, config_object=None, handle_config=None): self.stop_servers() self._register_and_run_servers(config_object, handle_config=handle_config) def _register_and_run_servers(self, config_object=None, handle_config=None): config_object = self._ensure_config_object(config_object) self.app = None if self.external_galaxy is None: if self._saved_galaxy_config is not None: galaxy_config = self._saved_galaxy_config else: tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir) # Configure the database path. galaxy_db_path = database_files_path(tempdir) # Allow config object to specify a config dict or a method to produce # one - other just read the properties above and use the default # implementation from this file. galaxy_config = getattr(config_object, "galaxy_config", None) if hasattr(galaxy_config, '__call__'): galaxy_config = galaxy_config() if galaxy_config is None: setup_galaxy_config_kwds = dict( use_test_file_dir=not self.testing_shed_tools, default_install_db_merged=True, default_tool_conf=self.default_tool_conf, datatypes_conf=self.datatypes_conf_override, prefer_template_database=getattr(config_object, "prefer_template_database", False), log_format=self.log_format, conda_auto_init=getattr(config_object, "conda_auto_init", False), conda_auto_install=getattr(config_object, "conda_auto_install", False), use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False) ) galaxy_config = setup_galaxy_config( galaxy_db_path, **setup_galaxy_config_kwds ) isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False) if isolate_galaxy_config: galaxy_config["config_dir"] = tempdir self._saved_galaxy_config = galaxy_config if galaxy_config is not None: handle_galaxy_config_kwds = handle_config or getattr( config_object, "handle_galaxy_config_kwds", None ) if handle_galaxy_config_kwds is not None: handle_galaxy_config_kwds(galaxy_config) if self.use_uwsgi: server_wrapper = launch_uwsgi( galaxy_config, tempdir=tempdir, config_object=config_object, ) else: # ---- Build Application -------------------------------------------------- self.app = build_galaxy_app(galaxy_config) server_wrapper = launch_server( self.app, buildapp.app_factory, galaxy_config, config_object=config_object, ) log.info("Functional tests will be run against external Galaxy server %s:%s" % (server_wrapper.host, server_wrapper.port)) self.server_wrappers.append(server_wrapper) else: log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy) # Ensure test file directory setup even though galaxy config isn't built. ensure_test_file_dir_set() def _ensure_config_object(self, config_object): if config_object is None: config_object = self return config_object def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True): setup_shed_tools_for_test( self.app, self.galaxy_test_tmp_dir, testing_migrated_tools, testing_installed_tools ) def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False): if self.app is None: return if testing_shed_tools is None: testing_shed_tools = getattr(self, "testing_shed_tools", False) # We must make sure that functional.test_toolbox is always imported after # database_contexts.galaxy_content is set (which occurs in this method above). # If functional.test_toolbox is imported before database_contexts.galaxy_content # is set, sa_session will be None in all methods that use it. import functional.test_toolbox functional.test_toolbox.toolbox = self.app.toolbox # When testing data managers, do not test toolbox. test_classes = functional.test_toolbox.build_tests( app=self.app, testing_shed_tools=testing_shed_tools, master_api_key=get_master_api_key(), user_api_key=get_user_api_key(), ) if return_test_classes: return test_classes return functional.test_toolbox def run_tool_test(self, tool_id, index=0, resource_parameters={}): host, port, url = target_url_parts() galaxy_interactor_kwds = { "galaxy_url": url, "master_api_key": get_master_api_key(), "api_key": get_user_api_key(), "keep_outputs_dir": None, } galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) verify_tool( tool_id=tool_id, test_index=index, galaxy_interactor=galaxy_interactor, resource_parameters=resource_parameters ) def drive_test(test_driver_class): """Instantiate driver class, run, and exit appropriately.""" test_driver = test_driver_class() sys.exit(test_driver.run()) def setup_keep_outdir(): keep_outdir = os.environ.get('GALAXY_TEST_SAVE', '') if keep_outdir > '': try: os.makedirs(keep_outdir) except Exception: pass return keep_outdir def target_url_parts(): host = socket.gethostbyname(os.environ.get('GALAXY_TEST_HOST', DEFAULT_WEB_HOST)) port = os.environ.get('GALAXY_TEST_PORT') default_url = "http://%s:%s" % (host, port) url = os.environ.get('GALAXY_TEST_EXTERNAL', default_url) return host, port, url __all__ = ( "copy_database_template", "build_logger", "drive_test", "FRAMEWORK_UPLOAD_TOOL_CONF", "FRAMEWORK_SAMPLE_TOOLS_CONF", "FRAMEWORK_DATATYPES_CONF", "database_conf", "get_webapp_global_conf", "nose_config_and_run", "setup_keep_outdir", "setup_galaxy_config", "target_url_parts", "TestDriver", "wait_for_http_server", )
Audio.py
import threading import traceback import pyaudio import sys import numpy import Globals class Audio: def __init__(self,device_index=-1,samplerate=44100,gain=1,channels=1,samples=2**10): self.pya = pyaudio.PyAudio() self.device_index = device_index self.samplerate = samplerate self.channels = channels self.samples = samples self.range = (-2**15,2**15) self.eventstop = threading.Event() self.audiowait = threading.Event() self.chunk = b'' self.frame = [-1] self.gain = gain self.eventstop.clear() self.audiowait.set() def start(self): if self.device_index != -1: self.stream = self.pya.open(format=pyaudio.paInt16,rate=self.samplerate,channels=self.channels,input_device_index=self.device_index,input=True) self.device = self.pya.get_device_info_by_index(self.device_index) if Globals.INFO['Audio']: sys.stdout.write("AUDIO_INFO: Opened {0}\n".format(self.device['name'])) self.eventstop.clear() self.audiowait.set() self.audio_thread = threading.Thread(target=self.run) self.audio_thread.daemon=True self.audio_thread.setName("Audio device {0}".format(self.device['name'])) self.audio_thread.start() def run(self): while True: self.audiowait.set() try: self.chunk = self.stream.read(self.samples) unorm = numpy.fromstring(self.chunk,dtype=numpy.int16) self.frame = [] for i in unorm: self.frame += [i/32768.0] except: e = sys.exc_info() sys.stderr.write('AUDIO_ERROR: {0}: {1}\n'.format(e[0],e[1])) traceback.print_tb(e[2]) if (self.eventstop.isSet()): self.eventstop.clear() break self.audiowait.clear() self.eventstop.clear() def getLastChunk(self): if self.audiowait.wait(3): return self.chunk else: return b'' def getLastFrame(self): if self.audiowait.wait(3): #return self.frame r = [] for i in self.frame: i = i*self.gain if (i > 1): i = 1 elif (i < -1): i = -1 r += [i] return r else: return [] def stop(self): if self.device_index != -1: if Globals.INFO['Audio']: sys.stdout.write("AUDIO_INFO: Closing {0}...\n".format(self.device['name'])) self.eventstop.set() self.eventstop.wait() self.audio_thread.join() self.stream.stop_stream() self.stream.close() self.pya.terminate()
train.py
#!/usr/bin/env python """ Main training workflow """ from __future__ import division import argparse import glob import os import random import signal import time import torch from pytorch_pretrained_bert import BertConfig import distributed from models import data_loader, model_builder from models.data_loader import load_dataset from models.model_builder import Summarizer from models.trainer import build_trainer from others.logging import logger, init_logger model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size'] def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def multi_main(args): """ Spawns 1 process per GPU """ init_logger() nb_gpu = args.world_size mp = torch.multiprocessing.get_context('spawn') # Create a thread to listen for errors in the child processes. error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) # Train with multiprocessing. procs = [] for i in range(nb_gpu): device_id = i procs.append(mp.Process(target=run, args=(args, device_id, error_queue,), daemon=True)) procs[i].start() logger.info(" Starting process pid: %d " % procs[i].pid) error_handler.add_child(procs[i].pid) for p in procs: p.join() def run(args, device_id, error_queue): """ run process """ setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks]) try: gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks) print('gpu_rank %d' %gpu_rank) if gpu_rank != args.gpu_ranks[device_id]: raise AssertionError("An error occurred in \ Distributed initialization") train(args,device_id) except KeyboardInterrupt: pass # killed by parent, do nothing except Exception: # propagate exception to parent process, keeping original traceback import traceback error_queue.put((args.gpu_ranks[device_id], traceback.format_exc())) class ErrorHandler(object): """A class that listens for exceptions in children processes and propagates the tracebacks to the parent process.""" def __init__(self, error_queue): """ init error handler """ import signal import threading self.error_queue = error_queue self.children_pids = [] self.error_thread = threading.Thread( target=self.error_listener, daemon=True) self.error_thread.start() signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): """ error handler """ self.children_pids.append(pid) def error_listener(self): """ error listener """ (rank, original_trace) = self.error_queue.get() self.error_queue.put((rank, original_trace)) os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, signalnum, stackframe): """ signal handler """ for pid in self.children_pids: os.kill(pid, signal.SIGINT) # kill children processes (rank, original_trace) = self.error_queue.get() msg = """\n\n-- Tracebacks above this line can probably be ignored --\n\n""" msg += original_trace raise Exception(msg) def wait_and_validate(args, device_id): timestep = 0 if (args.test_all): cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) xent_lst = [] for i, cp in enumerate(cp_files): step = int(cp.split('.')[-2].split('_')[-1]) xent = validate(args, device_id, cp, step) xent_lst.append((xent, cp)) max_step = xent_lst.index(min(xent_lst)) if (i - max_step > 10): break xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3] logger.info('PPL %s' % str(xent_lst)) for xent, cp in xent_lst: step = int(cp.split('.')[-2].split('_')[-1]) test(args, device_id, cp, step) else: while (True): cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) if (cp_files): cp = cp_files[-1] time_of_cp = os.path.getmtime(cp) if (not os.path.getsize(cp) > 0): time.sleep(60) continue if (time_of_cp > timestep): timestep = time_of_cp step = int(cp.split('.')[-2].split('_')[-1]) validate(args, device_id, cp, step) test(args, device_id, cp, step) cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) if (cp_files): cp = cp_files[-1] time_of_cp = os.path.getmtime(cp) if (time_of_cp > timestep): continue else: time.sleep(300) def validate(args, device_id, pt, step): device = "cpu" if args.visible_gpus == '-1' else "cuda" if (pt != ''): test_from = pt else: test_from = args.test_from logger.info('Loading checkpoint from %s' % test_from) checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) print(args) config = BertConfig.from_json_file(args.bert_config_path) model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config) model.load_cp(checkpoint) model.eval() valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False), args.batch_size, device, shuffle=False, is_test=False) trainer = build_trainer(args, device_id, model, None) stats = trainer.validate(valid_iter, step) return stats.xent() def test(args, device_id, pt, step): device = "cpu" if args.visible_gpus == '-1' else "cuda" if (pt != ''): test_from = pt else: test_from = args.test_from logger.info('Loading checkpoint from %s' % test_from) checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) print(args) config = BertConfig.from_json_file(args.bert_config_path) model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config) model.load_cp(checkpoint) model.eval() test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.batch_size, device, shuffle=False, is_test=True) trainer = build_trainer(args, device_id, model, None) trainer.test(test_iter,step) def baseline(args, cal_lead=False, cal_oracle=False): test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.batch_size, device, shuffle=False, is_test=True) trainer = build_trainer(args, device_id, None, None) # if (cal_lead): trainer.test(test_iter, 0, cal_lead=True) elif (cal_oracle): trainer.test(test_iter, 0, cal_oracle=True) def train(args, device_id): init_logger(args.log_file) device = "cpu" if args.visible_gpus == '-1' else "cuda" logger.info('Device ID %d' % device_id) logger.info('Device %s' % device) torch.manual_seed(args.seed) random.seed(args.seed) torch.backends.cudnn.deterministic = True if device_id >= 0: torch.cuda.set_device(device_id) torch.cuda.manual_seed(args.seed) torch.manual_seed(args.seed) random.seed(args.seed) torch.backends.cudnn.deterministic = True def train_iter_fct(): return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device, shuffle=True, is_test=False) model = Summarizer(args, device, load_pretrained_bert=True) if args.train_from != '': logger.info('Loading checkpoint from %s' % args.train_from) checkpoint = torch.load(args.train_from, map_location=lambda storage, loc: storage) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) model.load_cp(checkpoint) optim = model_builder.build_optim(args, model, checkpoint) else: optim = model_builder.build_optim(args, model, None) logger.info(model) trainer = build_trainer(args, device_id, model, optim) trainer.train(train_iter_fct, args.train_steps) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline']) parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test']) parser.add_argument("-bert_data_path", default='../data/bert_data/cnndm') parser.add_argument("-model_path", default='../models/') parser.add_argument("-result_path", default='../results/cnndm') parser.add_argument("-temp_dir", default='../temp') parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json') parser.add_argument("-batch_size", default=1000, type=int) parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True) parser.add_argument("-hidden_size", default=128, type=int) parser.add_argument("-ff_size", default=512, type=int) parser.add_argument("-heads", default=4, type=int) parser.add_argument("-inter_layers", default=2, type=int) parser.add_argument("-rnn_size", default=512, type=int) parser.add_argument("-param_init", default=0, type=float) parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True) parser.add_argument("-dropout", default=0.1, type=float) parser.add_argument("-optim", default='adam', type=str) parser.add_argument("-lr", default=1, type=float) parser.add_argument("-beta1", default= 0.9, type=float) parser.add_argument("-beta2", default=0.999, type=float) parser.add_argument("-decay_method", default='', type=str) parser.add_argument("-warmup_steps", default=8000, type=int) parser.add_argument("-max_grad_norm", default=0, type=float) parser.add_argument("-save_checkpoint_steps", default=5, type=int) parser.add_argument("-accum_count", default=1, type=int) parser.add_argument("-world_size", default=1, type=int) parser.add_argument("-report_every", default=1, type=int) parser.add_argument("-train_steps", default=1000, type=int) parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False) parser.add_argument('-visible_gpus', default='-1', type=str) parser.add_argument('-gpu_ranks', default='0', type=str) parser.add_argument('-log_file', default='../logs/cnndm.log') parser.add_argument('-dataset', default='') parser.add_argument('-seed', default=666, type=int) parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False) parser.add_argument("-test_from", default='') parser.add_argument("-train_from", default='') parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True) parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True) args = parser.parse_args() args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')] os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus init_logger(args.log_file) device = "cpu" if args.visible_gpus == '-1' else "cuda" device_id = 0 if device == "cuda" else -1 if(args.world_size>1): multi_main(args) elif (args.mode == 'train'): train(args, device_id) elif (args.mode == 'validate'): wait_and_validate(args, device_id) elif (args.mode == 'lead'): baseline(args, cal_lead=True) elif (args.mode == 'oracle'): baseline(args, cal_oracle=True) elif (args.mode == 'test'): cp = args.test_from try: step = int(cp.split('.')[-2].split('_')[-1]) except: step = 0 test(args, device_id, cp, step)
4_content_attributes2.py
import nltk nltk.download('stopwords') from nltk.corpus import stopwords from multiprocessing import Process from empath import Empath import pandas as pd import numpy as np import textblob import spacy import time import csv import re stopWords = set(stopwords.words('english')) nlp = spacy.load('en_core_web_lg') prog = re.compile("(@[A-Za-z0-9]+)|([^0-9A-Za-z' \t])|(\w+:\/\/\S+)") prog2 = re.compile(" +") lexicon = Empath() empath_cols = ["{0}_empath".format(v) for v in lexicon.cats.keys()] glove_cols = ["{0}_glove".format(v) for v in range(300)] def lemmatization(x, nlp): tweets = " ".join(list(x.values)) letters_only = prog.sub(" ", tweets) lemmatized = [] for token1 in nlp(letters_only): if token1.lemma_ != "-PRON-" and token1 not in stopWords: lemmatized.append(token1.lemma_) else: lemmatized.append(token1.text) final = prog2.sub(" ", " ".join(lemmatized)) return final def empath_analysis(x): val = lexicon.analyze(x, normalize=True) if val is None: return lexicon.analyze(x) else: return val def processing(vals, columns, iterv): users = pd.DataFrame(vals) users = users[columns] print("{0}-------------".format(iterv)) # PRE-PROCESSING users["any_text"] = users["tweet_text"] + users["rt_text"] + users["qt_text"] users_text = users.groupby(["user_id"])["any_text"].apply(lambda x: lemmatization(x, nlp)).reset_index() print("{0}-------------PRE-PROCESSING".format(iterv)) # GLOVE ANALYSIS glove_arr = np.array(list(users_text["any_text"].apply(lambda x: list(nlp(x).vector)).values)) df_glove = pd.DataFrame(glove_arr, columns=glove_cols, index=users_text.user_id.values) print("{0}-------------GLOVE".format(iterv)) # SENTIMENT ANALYSIS sentiment_arr = np.array(list(users_text["any_text"].apply(lambda x: textblob.TextBlob(str(x)).sentiment).values)) sentiment_cols = ["sentiment", "subjectivity"] df_sentiment = pd.DataFrame(sentiment_arr, columns=sentiment_cols, index=users_text.user_id.values) print("{0}-------------SENTIMENT".format(iterv)) # EMPATH ANALYSIS lexicon_arr = np.array(list(users_text["any_text"].apply(lambda x: empath_analysis(x)).values)) df_empath = pd.DataFrame.from_records(index=users_text.user_id.values, data=lexicon_arr) df_empath.columns = empath_cols print("{0}-------------EMPATH".format(iterv)) # MERGE TO SINGLE df = pd.DataFrame(pd.concat([df_empath, df_sentiment, df_glove], axis=1)) df.set_index("user_id", inplace=True) df.to_csv("../data/features/tmp/users_content_{0}.csv".format(iterv)) print("-------------{0}".format(iterv)) f = open("../data/preprocessing/tweets.csv", "r") cols = ["user_id", "screen_name", "tweet_id", "tweet_text", "tweet_creation", "tweet_fav", "tweet_rt", "rp_flag", "rp_status", "rp_user", "qt_flag", "qt_user_id", "qt_status_id", "qt_text", "qt_creation", "qt_fav", "qt_rt", "rt_flag", "rt_user_id", "rt_status_id", "rt_text", "rt_creation", "rt_fav", "rt_rt"] csv_dict_reader = csv.DictReader(f) acc_vals = [] iter_vals, count, count_max, last_u, v = 1, 0, 50000, None, [] for line in csv_dict_reader: if last_u is not None and last_u != line["user_id"]: # s = time.time() # processing(v, cols, iter_vals) # print(time.time() - s) acc_vals.append((v, cols, iter_vals)) count, last_u, v = 0, None, [] iter_vals += 1 if len(acc_vals) == 2: s = time.time() processes = [] for i in acc_vals: p = Process(target=processing, args=(i[0], i[1], i[2])) processes.append(p) for p in processes: p.start() for p in processes: p.join() print(time.time() - s) acc_vals = [] v.append(line) count += 1 if count >= count_max: last_u = line["user_id"] # s = time.time() # processing(v, cols, iter_vals) # print(time.time() - s) s = time.time() processes = [] for i in acc_vals: p = Process(target=processing, args=(i[0], i[1], i[2])) processes.append(p) for p in processes: p.start() for p in processes: p.join() print(time.time() - s) acc_vals = []
test_functional.py
import errno import logging import multiprocessing import os import signal import socket import string import subprocess import sys import time import unittest from waitress import server from waitress.compat import httplib, tobytes from waitress.utilities import cleanup_unix_socket dn = os.path.dirname here = dn(__file__) class NullHandler(logging.Handler): # pragma: no cover """A logging handler that swallows all emitted messages. """ def emit(self, record): pass def start_server(app, svr, queue, **kwargs): # pragma: no cover """Run a fixture application. """ logging.getLogger("waitress").addHandler(NullHandler()) try_register_coverage() svr(app, queue, **kwargs).run() def try_register_coverage(): # pragma: no cover # Hack around multiprocessing exiting early and not triggering coverage's # atexit handler by always registering a signal handler if "COVERAGE_PROCESS_START" in os.environ: def sigterm(*args): sys.exit(0) signal.signal(signal.SIGTERM, sigterm) class FixtureTcpWSGIServer(server.TcpWSGIServer): """A version of TcpWSGIServer that relays back what it's bound to. """ family = socket.AF_INET # Testing def __init__(self, application, queue, **kw): # pragma: no cover # Coverage doesn't see this as it's ran in a separate process. kw["port"] = 0 # Bind to any available port. super(FixtureTcpWSGIServer, self).__init__(application, **kw) host, port = self.socket.getsockname() if os.name == "nt": host = "127.0.0.1" queue.put((host, port)) class SubprocessTests(object): # For nose: all tests may be ran in separate processes. _multiprocess_can_split_ = True exe = sys.executable server = None def start_subprocess(self, target, **kw): # Spawn a server process. self.queue = multiprocessing.Queue() if "COVERAGE_RCFILE" in os.environ: os.environ["COVERAGE_PROCESS_START"] = os.environ["COVERAGE_RCFILE"] self.proc = multiprocessing.Process( target=start_server, args=(target, self.server, self.queue), kwargs=kw, ) self.proc.start() if self.proc.exitcode is not None: # pragma: no cover raise RuntimeError("%s didn't start" % str(target)) # Get the socket the server is listening on. self.bound_to = self.queue.get(timeout=5) self.sock = self.create_socket() def stop_subprocess(self): if self.proc.exitcode is None: self.proc.terminate() self.sock.close() # This give us one FD back ... self.queue.close() self.proc.join() def assertline(self, line, status, reason, version): v, s, r = (x.strip() for x in line.split(None, 2)) self.assertEqual(s, tobytes(status)) self.assertEqual(r, tobytes(reason)) self.assertEqual(v, tobytes(version)) def create_socket(self): return socket.socket(self.server.family, socket.SOCK_STREAM) def connect(self): self.sock.connect(self.bound_to) def make_http_connection(self): raise NotImplementedError # pragma: no cover def send_check_error(self, to_send): self.sock.send(to_send) class TcpTests(SubprocessTests): server = FixtureTcpWSGIServer def make_http_connection(self): return httplib.HTTPConnection(*self.bound_to) class SleepyThreadTests(TcpTests, unittest.TestCase): # test that sleepy thread doesnt block other requests def setUp(self): from waitress.tests.fixtureapps import sleepy self.start_subprocess(sleepy.app) def tearDown(self): self.stop_subprocess() def test_it(self): getline = os.path.join(here, "fixtureapps", "getline.py") cmds = ( [self.exe, getline, "http://%s:%d/sleepy" % self.bound_to], [self.exe, getline, "http://%s:%d/" % self.bound_to], ) r, w = os.pipe() procs = [] for cmd in cmds: procs.append(subprocess.Popen(cmd, stdout=w)) time.sleep(3) for proc in procs: if proc.returncode is not None: # pragma: no cover proc.terminate() proc.wait() # the notsleepy response should always be first returned (it sleeps # for 2 seconds, then returns; the notsleepy response should be # processed in the meantime) result = os.read(r, 10000) os.close(r) os.close(w) self.assertEqual(result, b"notsleepy returnedsleepy returned") class EchoTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess( echo.app, trusted_proxy="*", trusted_proxy_count=1, trusted_proxy_headers={"x-forwarded-for", "x-forwarded-proto"}, clear_untrusted_proxy_headers=True, ) def tearDown(self): self.stop_subprocess() def _read_echo(self, fp): from waitress.tests.fixtureapps import echo line, headers, body = read_http(fp) return line, headers, echo.parse_response(body) def test_date_and_server(self): to_send = "GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(headers.get("server"), "waitress") self.assertTrue(headers.get("date")) def test_bad_host_header(self): # https://corte.si/posts/code/pathod/pythonservers/index.html to_send = "GET / HTTP/1.0\r\n Host: 0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "400", "Bad Request", "HTTP/1.0") self.assertEqual(headers.get("server"), "waitress") self.assertTrue(headers.get("date")) def test_send_with_body(self): to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n" to_send += "hello" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(echo.content_length, "5") self.assertEqual(echo.body, b"hello") def test_send_empty_body(self): to_send = "GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(echo.content_length, "0") self.assertEqual(echo.body, b"") def test_multiple_requests_with_body(self): orig_sock = self.sock for x in range(3): self.sock = self.create_socket() self.test_send_with_body() self.sock.close() self.sock = orig_sock def test_multiple_requests_without_body(self): orig_sock = self.sock for x in range(3): self.sock = self.create_socket() self.test_send_empty_body() self.sock.close() self.sock = orig_sock def test_without_crlf(self): data = "Echo\r\nthis\r\nplease" s = tobytes( "GET / HTTP/1.0\r\n" "Connection: close\r\n" "Content-Length: %d\r\n" "\r\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(int(echo.content_length), len(data)) self.assertEqual(len(echo.body), len(data)) self.assertEqual(echo.body, tobytes(data)) def test_large_body(self): # 1024 characters. body = "This string has 32 characters.\r\n" * 32 s = tobytes( "GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(body), body) ) self.connect() self.sock.send(s) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(echo.content_length, "1024") self.assertEqual(echo.body, tobytes(body)) def test_many_clients(self): conns = [] for n in range(50): h = self.make_http_connection() h.request("GET", "/", headers={"Accept": "text/plain"}) conns.append(h) responses = [] for h in conns: response = h.getresponse() self.assertEqual(response.status, 200) responses.append(response) for response in responses: response.read() for h in conns: h.close() def test_chunking_request_without_content(self): header = tobytes("GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n") self.connect() self.sock.send(header) self.sock.send(b"0\r\n\r\n") fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.1") self.assertEqual(echo.body, b"") self.assertEqual(echo.content_length, "0") self.assertFalse("transfer-encoding" in headers) def test_chunking_request_with_content(self): control_line = b"20;\r\n" # 20 hex = 32 dec s = b"This string has 32 characters.\r\n" expected = s * 12 header = tobytes("GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n") self.connect() self.sock.send(header) fp = self.sock.makefile("rb", 0) for n in range(12): self.sock.send(control_line) self.sock.send(s) self.sock.send(b"\r\n") # End the chunk self.sock.send(b"0\r\n\r\n") line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.1") self.assertEqual(echo.body, expected) self.assertEqual(echo.content_length, str(len(expected))) self.assertFalse("transfer-encoding" in headers) def test_broken_chunked_encoding(self): control_line = "20;\r\n" # 20 hex = 32 dec s = "This string has 32 characters.\r\n" to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n" to_send += control_line + s + "\r\n" # garbage in input to_send += "garbage\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # receiver caught garbage and turned it into a 400 self.assertline(line, "400", "Bad Request", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"] ) self.assertEqual(headers["content-type"], "text/plain") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_broken_chunked_encoding_missing_chunk_end(self): control_line = "20;\r\n" # 20 hex = 32 dec s = "This string has 32 characters.\r\n" to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n" to_send += control_line + s # garbage in input to_send += "garbage" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # receiver caught garbage and turned it into a 400 self.assertline(line, "400", "Bad Request", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(b"Chunk not properly terminated" in response_body) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"] ) self.assertEqual(headers["content-type"], "text/plain") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_keepalive_http_10(self): # Handling of Keep-Alive within HTTP 1.0 data = "Default: Don't keep me alive" s = tobytes( "GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) connection = response.getheader("Connection", "") # We sent no Connection: Keep-Alive header # Connection: close (or no header) is default. self.assertTrue(connection != "Keep-Alive") def test_keepalive_http10_explicit(self): # If header Connection: Keep-Alive is explicitly sent, # we want to keept the connection open, we also need to return # the corresponding header data = "Keep me alive" s = tobytes( "GET / HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: %d\r\n" "\r\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) connection = response.getheader("Connection", "") self.assertEqual(connection, "Keep-Alive") def test_keepalive_http_11(self): # Handling of Keep-Alive within HTTP 1.1 # All connections are kept alive, unless stated otherwise data = "Default: Keep me alive" s = tobytes( "GET / HTTP/1.1\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertTrue(response.getheader("connection") != "close") def test_keepalive_http11_explicit(self): # Explicitly set keep-alive data = "Default: Keep me alive" s = tobytes( "GET / HTTP/1.1\r\n" "Connection: keep-alive\r\n" "Content-Length: %d\r\n" "\r\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertTrue(response.getheader("connection") != "close") def test_keepalive_http11_connclose(self): # specifying Connection: close explicitly data = "Don't keep me alive" s = tobytes( "GET / HTTP/1.1\r\n" "Connection: close\r\n" "Content-Length: %d\r\n" "\r\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertEqual(response.getheader("connection"), "close") def test_proxy_headers(self): to_send = ( "GET / HTTP/1.0\r\n" "Content-Length: 0\r\n" "Host: www.google.com:8080\r\n" "X-Forwarded-For: 192.168.1.1\r\n" "X-Forwarded-Proto: https\r\n" "X-Forwarded-Port: 5000\r\n\r\n" ) to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(headers.get("server"), "waitress") self.assertTrue(headers.get("date")) self.assertIsNone(echo.headers.get("X_FORWARDED_PORT")) self.assertEqual(echo.headers["HOST"], "www.google.com:8080") self.assertEqual(echo.scheme, "https") self.assertEqual(echo.remote_addr, "192.168.1.1") self.assertEqual(echo.remote_host, "192.168.1.1") class PipeliningTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess(echo.app_body_only) def tearDown(self): self.stop_subprocess() def test_pipelining(self): s = ( "GET / HTTP/1.0\r\n" "Connection: %s\r\n" "Content-Length: %d\r\n" "\r\n" "%s" ) to_send = b"" count = 25 for n in range(count): body = "Response #%d\r\n" % (n + 1) if n + 1 < count: conn = "keep-alive" else: conn = "close" to_send += tobytes(s % (conn, len(body), body)) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) for n in range(count): expect_body = tobytes("Response #%d\r\n" % (n + 1)) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) length = int(headers.get("content-length")) or None response_body = fp.read(length) self.assertEqual(int(status), 200) self.assertEqual(length, len(response_body)) self.assertEqual(response_body, expect_body) class ExpectContinueTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess(echo.app_body_only) def tearDown(self): self.stop_subprocess() def test_expect_continue(self): # specifying Connection: close explicitly data = "I have expectations" to_send = tobytes( "GET / HTTP/1.1\r\n" "Connection: close\r\n" "Content-Length: %d\r\n" "Expect: 100-continue\r\n" "\r\n" "%s" % (len(data), data) ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line = fp.readline() # continue status line version, status, reason = (x.strip() for x in line.split(None, 2)) self.assertEqual(int(status), 100) self.assertEqual(reason, b"Continue") self.assertEqual(version, b"HTTP/1.1") fp.readline() # blank line line = fp.readline() # next status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) length = int(headers.get("content-length")) or None response_body = fp.read(length) self.assertEqual(int(status), 200) self.assertEqual(length, len(response_body)) self.assertEqual(response_body, tobytes(data)) class BadContentLengthTests(object): def setUp(self): from waitress.tests.fixtureapps import badcl self.start_subprocess(badcl.app) def tearDown(self): self.stop_subprocess() def test_short_body(self): # check to see if server closes connection when body is too short # for cl header to_send = tobytes( "GET /short_body HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get("content-length")) response_body = fp.read(content_length) self.assertEqual(int(status), 200) self.assertNotEqual(content_length, len(response_body)) self.assertEqual(len(response_body), content_length - 1) self.assertEqual(response_body, tobytes("abcdefghi")) # remote closed connection (despite keepalive header); not sure why # first send succeeds self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_long_body(self): # check server doesnt close connection when body is too short # for cl header to_send = tobytes( "GET /long_body HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get("content-length")) or None response_body = fp.read(content_length) self.assertEqual(int(status), 200) self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes("abcdefgh")) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get("content-length")) or None response_body = fp.read(content_length) self.assertEqual(int(status), 200) class NoContentLengthTests(object): def setUp(self): from waitress.tests.fixtureapps import nocl self.start_subprocess(nocl.app) def tearDown(self): self.stop_subprocess() def test_http10_generator(self): body = string.ascii_letters to_send = ( "GET / HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: %d\r\n\r\n" % len(body) ) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(headers.get("content-length"), None) self.assertEqual(headers.get("connection"), "close") self.assertEqual(response_body, tobytes(body)) # remote closed connection (despite keepalive header), because # generators cannot have a content-length divined self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http10_list(self): body = string.ascii_letters to_send = ( "GET /list HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: %d\r\n\r\n" % len(body) ) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(headers["content-length"], str(len(body))) self.assertEqual(headers.get("connection"), "Keep-Alive") self.assertEqual(response_body, tobytes(body)) # remote keeps connection open because it divined the content length # from a length-1 list self.sock.send(to_send) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") def test_http10_listlentwo(self): body = string.ascii_letters to_send = ( "GET /list_lentwo HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: %d\r\n\r\n" % len(body) ) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(headers.get("content-length"), None) self.assertEqual(headers.get("connection"), "close") self.assertEqual(response_body, tobytes(body)) # remote closed connection (despite keepalive header), because # lists of length > 1 cannot have their content length divined self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http11_generator(self): body = string.ascii_letters to_send = "GET / HTTP/1.1\r\nContent-Length: %s\r\n\r\n" % len(body) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") expected = b"" for chunk in chunks(body, 10): expected += tobytes( "%s\r\n%s\r\n" % (str(hex(len(chunk))[2:].upper()), chunk) ) expected += b"0\r\n\r\n" self.assertEqual(response_body, expected) # connection is always closed at the end of a chunked response self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http11_list(self): body = string.ascii_letters to_send = "GET /list HTTP/1.1\r\nContent-Length: %d\r\n\r\n" % len(body) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") self.assertEqual(headers["content-length"], str(len(body))) self.assertEqual(response_body, tobytes(body)) # remote keeps connection open because it divined the content length # from a length-1 list self.sock.send(to_send) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") def test_http11_listlentwo(self): body = string.ascii_letters to_send = "GET /list_lentwo HTTP/1.1\r\nContent-Length: %s\r\n\r\n" % len(body) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") expected = b"" for chunk in (body[0], body[1:]): expected += tobytes( "%s\r\n%s\r\n" % (str(hex(len(chunk))[2:].upper()), chunk) ) expected += b"0\r\n\r\n" self.assertEqual(response_body, expected) # connection is always closed at the end of a chunked response self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class WriteCallbackTests(object): def setUp(self): from waitress.tests.fixtureapps import writecb self.start_subprocess(writecb.app) def tearDown(self): self.stop_subprocess() def test_short_body(self): # check to see if server closes connection when body is too short # for cl header to_send = tobytes( "GET /short_body HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (5) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, 9) self.assertNotEqual(cl, len(response_body)) self.assertEqual(len(response_body), cl - 1) self.assertEqual(response_body, tobytes("abcdefgh")) # remote closed connection (despite keepalive header) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_long_body(self): # check server doesnt close connection when body is too long # for cl header to_send = tobytes( "GET /long_body HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) content_length = int(headers.get("content-length")) or None self.assertEqual(content_length, 9) self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes("abcdefghi")) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") def test_equal_body(self): # check server doesnt close connection when body is equal to # cl header to_send = tobytes( "GET /equal_body HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) content_length = int(headers.get("content-length")) or None self.assertEqual(content_length, 9) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes("abcdefghi")) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") def test_no_content_length(self): # wtf happens when there's no content-length to_send = tobytes( "GET /no_content_length HTTP/1.0\r\n" "Connection: Keep-Alive\r\n" "Content-Length: 0\r\n" "\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line = fp.readline() # status line line, headers, response_body = read_http(fp) content_length = headers.get("content-length") self.assertEqual(content_length, None) self.assertEqual(response_body, tobytes("abcdefghi")) # remote closed connection (despite keepalive header) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class TooLargeTests(object): toobig = 1050 def setUp(self): from waitress.tests.fixtureapps import toolarge self.start_subprocess( toolarge.app, max_request_header_size=1000, max_request_body_size=1000 ) def tearDown(self): self.stop_subprocess() def test_request_body_too_large_with_wrong_cl_http10(self): body = "a" * self.toobig to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # server trusts the content-length header; no pipelining, # so request fulfilled, extra bytes are thrown away # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http10_keepalive(self): body = "a" * self.toobig to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\nConnection: Keep-Alive\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) line, headers, response_body = read_http(fp) self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http10(self): body = "a" * self.toobig to_send = "GET / HTTP/1.0\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # extra bytes are thrown away (no pipelining), connection closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http10_keepalive(self): body = "a" * self.toobig to_send = "GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (assumed zero) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) line, headers, response_body = read_http(fp) # next response overruns because the extra data appears to be # header data self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http11(self): body = "a" * self.toobig to_send = "GET / HTTP/1.1\r\nContent-Length: 5\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # second response is an error response line, headers, response_body = read_http(fp) self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http11_connclose(self): body = "a" * self.toobig to_send = "GET / HTTP/1.1\r\nContent-Length: 5\r\nConnection: close\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (5) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http11(self): body = "a" * self.toobig to_send = "GET / HTTP/1.1\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") # server trusts the content-length header (assumed 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # server assumes pipelined requests due to http/1.1, and the first # request was assumed c-l 0 because it had no content-length header, # so entire body looks like the header of the subsequent request # second response is an error response line, headers, response_body = read_http(fp) self.assertline(line, "431", "Request Header Fields Too Large", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http11_connclose(self): body = "a" * self.toobig to_send = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (assumed 0) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_chunked_encoding(self): control_line = "20;\r\n" # 20 hex = 32 dec s = "This string has 32 characters.\r\n" to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n" repeat = control_line + s to_send += repeat * ((self.toobig // len(repeat)) + 1) to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) # body bytes counter caught a max_request_body_size overrun self.assertline(line, "413", "Request Entity Too Large", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertEqual(headers["content-type"], "text/plain") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class InternalServerErrorTests(object): def setUp(self): from waitress.tests.fixtureapps import error self.start_subprocess(error.app, expose_tracebacks=True) def tearDown(self): self.stop_subprocess() def test_before_start_response_http_10(self): to_send = "GET /before_start_response HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual(headers["connection"], "close") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_before_start_response_http_11(self): to_send = "GET /before_start_response HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"] ) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_before_start_response_http_11_close(self): to_send = tobytes( "GET /before_start_response HTTP/1.1\r\nConnection: close\r\n\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"], ) self.assertEqual(headers["connection"], "close") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http10(self): to_send = "GET /after_start_response HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"], ) self.assertEqual(headers["connection"], "close") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http11(self): to_send = "GET /after_start_response HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"] ) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http11_close(self): to_send = tobytes( "GET /after_start_response HTTP/1.1\r\nConnection: close\r\n\r\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"], ) self.assertEqual(headers["connection"], "close") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_write_cb(self): to_send = "GET /after_write_cb HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") self.assertEqual(response_body, b"") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_in_generator(self): to_send = "GET /in_generator HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") self.assertEqual(response_body, b"") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class FileWrapperTests(object): def setUp(self): from waitress.tests.fixtureapps import filewrapper self.start_subprocess(filewrapper.app) def tearDown(self): self.stop_subprocess() def test_filelike_http11(self): to_send = "GET /filelike HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has not been closed def test_filelike_nocl_http11(self): to_send = "GET /filelike_nocl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has not been closed def test_filelike_shortcl_http11(self): to_send = "GET /filelike_shortcl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, 1) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377" in response_body) # connection has not been closed def test_filelike_longcl_http11(self): to_send = "GET /filelike_longcl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has not been closed def test_notfilelike_http11(self): to_send = "GET /notfilelike HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has not been closed def test_notfilelike_iobase_http11(self): to_send = "GET /notfilelike_iobase HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has not been closed def test_notfilelike_nocl_http11(self): to_send = "GET /notfilelike_nocl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed (no content-length) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_shortcl_http11(self): to_send = "GET /notfilelike_shortcl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, 1) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377" in response_body) # connection has not been closed def test_notfilelike_longcl_http11(self): to_send = "GET /notfilelike_longcl HTTP/1.1\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body) + 10) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_filelike_http10(self): to_send = "GET /filelike HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_filelike_nocl_http10(self): to_send = "GET /filelike_nocl HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_http10(self): to_send = "GET /notfilelike HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_nocl_http10(self): to_send = "GET /notfilelike_nocl HTTP/1.0\r\n\r\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body) # connection has been closed (no content-length) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class TcpEchoTests(EchoTests, TcpTests, unittest.TestCase): pass class TcpPipeliningTests(PipeliningTests, TcpTests, unittest.TestCase): pass class TcpExpectContinueTests(ExpectContinueTests, TcpTests, unittest.TestCase): pass class TcpBadContentLengthTests(BadContentLengthTests, TcpTests, unittest.TestCase): pass class TcpNoContentLengthTests(NoContentLengthTests, TcpTests, unittest.TestCase): pass class TcpWriteCallbackTests(WriteCallbackTests, TcpTests, unittest.TestCase): pass class TcpTooLargeTests(TooLargeTests, TcpTests, unittest.TestCase): pass class TcpInternalServerErrorTests( InternalServerErrorTests, TcpTests, unittest.TestCase ): pass class TcpFileWrapperTests(FileWrapperTests, TcpTests, unittest.TestCase): pass if hasattr(socket, "AF_UNIX"): class FixtureUnixWSGIServer(server.UnixWSGIServer): """A version of UnixWSGIServer that relays back what it's bound to. """ family = socket.AF_UNIX # Testing def __init__(self, application, queue, **kw): # pragma: no cover # Coverage doesn't see this as it's ran in a separate process. # To permit parallel testing, use a PID-dependent socket. kw["unix_socket"] = "/tmp/waitress.test-%d.sock" % os.getpid() super(FixtureUnixWSGIServer, self).__init__(application, **kw) queue.put(self.socket.getsockname()) class UnixTests(SubprocessTests): server = FixtureUnixWSGIServer def make_http_connection(self): return UnixHTTPConnection(self.bound_to) def stop_subprocess(self): super(UnixTests, self).stop_subprocess() cleanup_unix_socket(self.bound_to) def send_check_error(self, to_send): # Unlike inet domain sockets, Unix domain sockets can trigger a # 'Broken pipe' error when the socket it closed. try: self.sock.send(to_send) except socket.error as exc: self.assertEqual(get_errno(exc), errno.EPIPE) class UnixEchoTests(EchoTests, UnixTests, unittest.TestCase): pass class UnixPipeliningTests(PipeliningTests, UnixTests, unittest.TestCase): pass class UnixExpectContinueTests(ExpectContinueTests, UnixTests, unittest.TestCase): pass class UnixBadContentLengthTests( BadContentLengthTests, UnixTests, unittest.TestCase ): pass class UnixNoContentLengthTests(NoContentLengthTests, UnixTests, unittest.TestCase): pass class UnixWriteCallbackTests(WriteCallbackTests, UnixTests, unittest.TestCase): pass class UnixTooLargeTests(TooLargeTests, UnixTests, unittest.TestCase): pass class UnixInternalServerErrorTests( InternalServerErrorTests, UnixTests, unittest.TestCase ): pass class UnixFileWrapperTests(FileWrapperTests, UnixTests, unittest.TestCase): pass def parse_headers(fp): """Parses only RFC2822 headers from a file pointer. """ headers = {} while True: line = fp.readline() if line in (b"\r\n", b"\n", b""): break line = line.decode("iso-8859-1") name, value = line.strip().split(":", 1) headers[name.lower().strip()] = value.lower().strip() return headers class UnixHTTPConnection(httplib.HTTPConnection): """Patched version of HTTPConnection that uses Unix domain sockets. """ def __init__(self, path): httplib.HTTPConnection.__init__(self, "localhost") self.path = path def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.path) self.sock = sock class ConnectionClosed(Exception): pass # stolen from gevent def read_http(fp): # pragma: no cover try: response_line = fp.readline() except socket.error as exc: fp.close() # errno 104 is ENOTRECOVERABLE, In WinSock 10054 is ECONNRESET if get_errno(exc) in (errno.ECONNABORTED, errno.ECONNRESET, 104, 10054): raise ConnectionClosed raise if not response_line: raise ConnectionClosed header_lines = [] while True: line = fp.readline() if line in (b"\r\n", b"\r\n", b""): break else: header_lines.append(line) headers = dict() for x in header_lines: x = x.strip() if not x: continue key, value = x.split(b": ", 1) key = key.decode("iso-8859-1").lower() value = value.decode("iso-8859-1") assert key not in headers, "%s header duplicated" % key headers[key] = value if "content-length" in headers: num = int(headers["content-length"]) body = b"" left = num while left > 0: data = fp.read(left) if not data: break body += data left -= len(data) else: # read until EOF body = fp.read() return response_line, headers, body # stolen from gevent def get_errno(exc): # pragma: no cover """ Get the error code out of socket.error objects. socket.error in <2.5 does not have errno attribute socket.error in 3.x does not allow indexing access e.args[0] works for all. There are cases when args[0] is not errno. i.e. http://bugs.python.org/issue6471 Maybe there are cases when errno is set, but it is not the first argument? """ try: if exc.errno is not None: return exc.errno except AttributeError: pass try: return exc.args[0] except IndexError: return None def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i : i + n]
aqualogic_mqtt_old.py
""" This is the main program for the Aqualogic inteface. It is meant to run as a systemd daemon. It can also be used stand-alone from the command line. An example systemd.service file is included and can be used to start and stop the service. This program starts the following multiprocessing threads: - PoolCntl This thread connects the RS485 interface to the AquaLogic controller It monitors the serial stream for data packets and puts those packets on the from_pool queue for decoding in the PoolState thread It also monitors the to_pool queue and sends those commands to the serial interface after a detected keep-alive packet. This thread does not monitor the state of the pool controller, or handle resending data if it was not received. - PoolState This thread connects to the mqtt interface to Home Assistant (or other mqtt based home controller). It monitors the mqtt interface for state change requests and updates the state as requested. These state change requests will create a command to the PoolCntl thread via the to_pool queue. It also monitors the from_pool queue and determines if the incoming packets indicate a state change from the pool controller. If the state has changed it publishes the change to the mqtt interfac. - ThreadedLogger This thread creates a single log file that can be used by both worker threads. It is also used by aqualogic_mqtt for logging. Required arguments: -m --mqtt The address to the mqtt server to use for the interface -s --serial The device address to use as the serial interface to the Aqualogic controller -l --log_file Path for the desired log file """ import argparse from core import PoolState, PoolCntl, ThreadedLogger, GracefulKiller from multiprocessing import Process, Queue import time def main(): print("Hello from main") ser = '/dev/ttyUSB0' to_pool = Queue() from_pool = Queue() logger = Queue() pool_cntl = PoolCntl(ser, to_pool, from_pool, logger) killer = GracefulKiller() p = Process(target=pool_cntl.process) p.start p.join print("Type Cntl-C to close program") while True: if not logger.empty(): print(logger.get()) time.sleep(0.1) if killer.kill_now: break print ("End of the program. I was killed gracefully") if __name__ == '__main__': main() # q = Queue() # p = Process(target=f, args=(q,)) # p.start() # print(q.get()) # prints "[42, None, 'hello']" # p.join()
destroy.py
from core.config import Settings from core.providers.aws import BaseAction from core.terraform import PyTerraform from core import constants as K from time import sleep from threading import Thread from datetime import datetime import importlib import sys class Destroy(BaseAction): """ AWS provider for destroy command Attributes: executed_with_error (boolean): this is set to True if any error occurs destroy_start_time (time): Starting time when the execution started destroy_statuses (dict): Available destroy statuses exception (Excpetion obj): exception object if occured terraform_thread (thread): Destroy python threads """ executed_with_error = False destroy_start_time = datetime.now() destroy_statuses = { "tf_destroy_start": 1, "execution_finished": 3 } exception = None terraform_thread = None def __init__(self, args, input_obj): self.args = args super().__init__(input_obj) def execute(self, resources, terraform_with_targets, dry_run): """ This is the starting method where destroy begins. This is the actual method called from the main destroy class Args: resources (list): Resources to be destroyed terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied) dry_run (boolean): Decides whether original destroy should be done """ error_response = self.validate_arguments(resources, terraform_with_targets) if not error_response: self._create_terraform_provider_file() self.execute_terraform_destroy(resources, terraform_with_targets, dry_run) self._delete_terraform_provider_file() else: self.exit_with_validation_errors(error_response) def execute_terraform_destroy(self, resources, terraform_with_targets, dry_run): """ Initialises the destroy execution, print the message and call the threads creation method Args: resources (list): Resources to be destroyed terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied) dry_run (boolean): Decides whether original destroy should be done """ self.show_step_heading(K.TERRAFORM_DESTROY_STARTED, write_log=False) if not dry_run: self.destroy_start_time = datetime.now() self.current_destroy_status = self.destroy_statuses.get('tf_destroy_start') self.destroy_resources_and_show_progress(resources, terraform_with_targets) self._cleanup_destroy() if self.executed_with_error: raise Exception(self.exception) else: self.show_step_finish(K.TERRAFORM_DESTROY_DRY_RUN) def _cleanup_destroy(self): self._delete_terraform_provider_file() def run_pre_destoy(self, resources): """ Call all resource's pre destroy hook if there is any post destroy activity is to be made Args: resources (list): Resources to be destroyed """ for resource in resources: resource.pre_terraform_destroy() def run_post_destoy(self, resources): """ Call all resource's post_destroy hook if there is any post destroy activity is to be made Args: resources (list): Resources to be destroyed """ for resource in resources: resource.post_terraform_destroy() resource.remove_terraform() def destroy_resources_and_show_progress(self, resources, terraform_with_targets): """ Creates 2 thread 1. For actualy destroy 2. For displaying the status of destruction Since python is interpreted language we need to create threads to display the status in one and actual process in another Args: resources (list): Resources to be destroyed terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied) dry_run (boolean): Decides whether original destroy should be done """ self.terraform_thread = Thread(target=self.destroy_resources, args=(list(resources), terraform_with_targets)) progressbar_thread = Thread(target=self.show_progress_status, args=(list(resources), terraform_with_targets)) self.terraform_thread.start() progressbar_thread.start() self.terraform_thread.join() progressbar_thread.join() def destroy_resources(self, resources, terraform_with_targets): """ Start destroying the esources by calling PyTerraform class destroy Args: resources (list): Resources to be destroyed terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied) """ destroy_resources = resources if terraform_with_targets else None self.run_pre_destoy(resources) # May be timeout causes first destroy to be a failure hence attempt as many times as the value in the setting for attempt in range(Settings.DESTROY_NUM_ATTEMPTS): self.executed_with_error = False self.exception = None try: PyTerraform().terraform_destroy(destroy_resources) self.run_post_destoy(resources) break except Exception as e: self.executed_with_error = True self.exception = e PyTerraform.save_terraform_output() self.current_destroy_status = self.destroy_statuses.get('execution_finished') def show_progress_status(self, resources, terraform_with_targets): """ Show status of the destruction to user by printing messages Args: resources (list): Resources to be destroyed terraform_with_targets (boolean): If partial destroy is to be done (if --tags is supplied) """ sleep(1) # To sleep initaially for pre-destroy to process while self.destroy_statuses.get('execution_finished') != self.current_destroy_status and self.terraform_thread.isAlive(): duration = self.CYAN_ANSI + self.get_duration(datetime.now() - self.destroy_start_time) + self.END_ANSI message = "Time elapsed: %s" % duration self.show_progress_message(message, 1.5) self.erase_printed_line() if self.destroy_statuses.get('execution_finished') == self.current_destroy_status: if self.executed_with_error: self.show_step_finish(K.TERRAFORM_DESTROY_ERROR, write_log=False, color=self.ERROR_ANSI) else: self.show_step_finish(K.TERRAFORM_DESTROY_COMPLETED, write_log=False, color=self.GREEN_ANSI) end_time = datetime.now() self.display_process_duration(self.destroy_start_time, end_time)
MegaMind_engine_anonymous.py
import threading import struct import json #from Session import Session #from Session import Extension from bcolors import bcolors #from Sandbox import Sandbox #from Sandbox import Sandbox_pool from time import sleep import os from mypipe import MyPipe #port_start_speech = consts.PortNumber_start_speech_2 #port_end_speech = consts.PortNumber_end_speech_2 session_in_progress = False state = "idle" start_session_signal = False end_session_signal = False listen_event_signal = False recieved_response_signal = False end_session_signal = False first_req_of_session = False def debug_log(*args, **kwargs): print( "YYY "+" ".join(map(str,args))+" YYY", **kwargs) def wait_for_start_session_notice(): session_start_pipe.wait_on_pipe() def wait_for_end_session_notice(): session_end_pipe.wait_on_pipe() def wait_for_keyword_detection(): kwd_pipe.wait_on_pipe() return def wait_for_payload(): data = payload_pipe.read_from_pipe() return data def wait_for_speech_recognition_done(): # with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # s.bind( (consts.Host,port_end_speech) ) # s.listen() # conn,addr = s.accept() # with conn: # data = conn.recv(4096) # cmd = data.decode() # cmd.replace('.','') # cmd.replace('?','') # cmd.replace('!','') # conn.close() # s.close() # return cmd cmd = speech_recog_end_pipe.read_from_pipe() cmd.replace('.','') cmd.replace('?','') cmd.replace('!','') return cmd def send_cmd_to_sdk(cmd): resp_pipe.write_to_pipe(cmd+'\0') def start_speech_recognition(): speech_recog_start_pipe.write_to_pipe("s") def payload_thread(name): print('payload start') global recieved_response_signal global current_session while True: # print('before wait_for_payload') payload = wait_for_payload() tokens = payload.split('"') # print('after wait_for_payload') # print('Payload is= ' + payload) # print('tokens = ') # print(tokens) caption = ' caption not found' token_id = 'token not found' for i in range(1,len(tokens)): if(tokens[i] == 'caption'): caption = tokens[i+2] if(tokens[i] == 'token'): token_id = tokens[i+2] print( ' caption = ' + bcolors.OKBLUE + caption + bcolors.ENDC ) print( ' token = ' + token_id ) recieved_response_signal = True anonymous_payload_pipe.write_to_pipe(caption) wfr_state_end_pipe.write_to_pipe("s") #while (recieved_response_signal == True): # pass def wait_for_listenning_thread(name): print('wait_for_listenning_thread') global listen_event_signal while True: wait_for_keyword_detection() listen_event_signal = True wfl_state_end_pipe.write_to_pipe("s") while(listen_event_signal == True): pass def start_session_notice_thread(name): print("start_session_notice_thread") while True: wait_for_start_session_notice() print("start_session_notice recieved") if ( state == "idle"): start_session() else: end_session() while( state != "idle"): pass start_session() return def end_session_notice_thread(name): print("end_session_notice_thread") while True: wait_for_end_session_notice() if (( state == "wait_for_listenning") or ( state == "wait_for_response") ): if( state == "wait_for_response"): payload_pipe.write_to_pipe("s") end_session() else: print("somthing is wrong!!") return def start_session(): global start_session_signal global first_req_of_session start_session_signal = True print(bcolors.FAIL + "*************NEW SESSION****************"+ bcolors.ENDC) first_req_of_session = True idle_state_end_pipe.write_to_pipe("s") while (start_session_signal == True): pass return def end_session(): global end_session_signal global current_session end_session_signal = True wfl_state_end_pipe.write_to_pipe("s") #wfr_state_end_pipe.write_to_pipe("s") print(bcolors.FAIL + "*************SESSION ENDS****************"+ bcolors.ENDC) while (end_session_signal == True): pass return def local_skill_id_finder(cmd): if 'open' in cmd: return cmd.replace('open ','') else: return "built-in" def get_user_cmd_and_send_it(): global first_req_of_session global current_session global extensions global active_extensions global sandbox_pool start_speech_recognition() cmd = wait_for_speech_recognition_done() cmd = cmd.rstrip() print('=======================================') print( ' Your command is:' + bcolors.OKGREEN + cmd + '\n' + bcolors.ENDC ) cmd = cmd.replace(',','') cmd = cmd.replace('.','') cmd = cmd.replace('!','') cmd = cmd.replace('?','') cmd = cmd.lower() send_cmd_to_sdk(cmd) return def main(): print('Welcome to MegaMind Engine') os.system('rm -rf /tmp/MegaMind') print('Initializing trigers') global kwd_pipe kwd_pipe = MyPipe('keyword_detection') kwd_pipe.make() global resp_pipe resp_pipe = MyPipe('MegaMind_engine_response') resp_pipe.make() global session_start_pipe session_start_pipe = MyPipe('session_start') session_start_pipe.make() global session_end_pipe session_end_pipe = MyPipe('session_end') session_end_pipe.make() global payload_pipe payload_pipe = MyPipe('payload') payload_pipe.make() global anonymous_payload_pipe anonymous_payload_pipe = MyPipe('anonymous_payload') anonymous_payload_pipe.make() global speech_recog_start_pipe speech_recog_start_pipe = MyPipe('speech_recog_start') speech_recog_start_pipe.make() global speech_recog_end_pipe speech_recog_end_pipe = MyPipe('speech_recog_end') speech_recog_end_pipe.make() global idle_state_end_pipe idle_state_end_pipe = MyPipe('idle_state_end') idle_state_end_pipe.make() global wfl_state_end_pipe wfl_state_end_pipe = MyPipe('wfl_state_end') wfl_state_end_pipe.make() global wfr_state_end_pipe wfr_state_end_pipe = MyPipe('wfr_state_end') wfr_state_end_pipe.make() global mute_pipe mute_pipe = MyPipe('mute_pipe') mute_pipe.make() global unmute_pipe unmute_pipe = MyPipe('unmute_pipe') unmute_pipe.make() print('Starting threads') th1 = threading.Thread(target=payload_thread, args=(1,), daemon=True) th1.start() th2 = threading.Thread(target=start_session_notice_thread, args=(1,), daemon=True) th2.start() th3 = threading.Thread(target=end_session_notice_thread, args=(1,), daemon=True) th3.start() th4 = threading.Thread(target=wait_for_listenning_thread, args=(1,), daemon=True) th4.start() global state global start_session_signal global end_session_signal global listen_event_signal global recieved_response_signal global current_session while True: while ( state == "idle" ): idle_state_end_pipe.wait_on_pipe() if( start_session_signal == True ): state = "wait_for_listenning" start_session_signal = False debug_log("idle -> wait_for_listenning") break while ( state == "wait_for_listenning" ): wfl_state_end_pipe.wait_on_pipe() if ( end_session_signal == True): state = "idle" end_session_signal = False debug_log("wait_for_listenning -> idle") break if( listen_event_signal == True): state = "get_cmd" listen_event_signal = False debug_log("wait_for_listenning -> get_cmd") break while ( state == "get_cmd" ): get_user_cmd_and_send_it() state = "wait_for_response" debug_log("get_cmd -> wait_for_response") break while ( state == "wait_for_response"): wfr_state_end_pipe.wait_on_pipe() if( recieved_response_signal == True): recieved_response_signal = False state = "wait_for_listenning" debug_log("wait_for_response -> wait_for_listenning") break if( end_session_signal == True ): end_session_signal = False debug_log("wait_for_response -> idle") state = "idle" break if __name__ == '__main__': main()
Tag_Unused.py
import requests import xml.etree.ElementTree as ET import sys import csv import threading import datetime import re import getpass from apigen import get_key requests.packages.urllib3.disable_warnings() def create_new_tag(fw, key, tag): #Create new tag r = requests.post(f"https://{fw}/api/?type=config&action=set&key={key}&xpath=/config/shared/tag/entry[@name='Unused - {tag}']&element=<color>color2</color>", verify=False) if r.status_code == 200: root = ET.fromstring(r.text) if root.get("status") == "success": print("\nCreated tag, proceeding..") else: print("\nCould not create tag, exiting..") sys.exit() else: print("\nCould not create tag, exiting..") sys.exit() def get_unused_rules (fw, key, dgrp, ticket): #Pulls all unused rules r = requests.post(f"https://{fw}/api/?key={key}&type=op&cmd=<show><rule-hit-count><device-group><entry name='{dgrp}'><post-rulebase><entry name='security'><rules><all/></rules></entry></post-rulebase></entry></device-group></rule-hit-count></show>",verify=False) if r.status_code == 200: root = ET.fromstring(r.text) unusedrules = [] # Get all rule names for i in root.findall("./result/rule-hit-count/device-group/entry/rule-base/entry/rules/"): rname = i.get("name") state = i.find("rule-state").text #Append unused rules to list if state == 'Unused': unusedrules.append(rname) #Tag rules and write to csv for rule in unusedrules: unused = rule if any(i in unused for i in ignorelist) is True: pass else: addtag = requests.post(f"https://{fw}/api/?key={key}&type=config&action=set&xpath=/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{dgrp}']/post-rulebase/security/rules/entry[@name='{unused}']/tag&element=<member>Unused - {tag}</member>",verify=False) if addtag.status_code == 200: root = ET.fromstring(addtag.text) if root.get("status") == "success": resultWriter.writerow([unused, dgrp]) addcomment = requests.post(f"https://{fw}/api/?key={key}&type=op&cmd=<set><audit-comment><xpath>/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{dgrp}']/post-rulebase/security/rules/entry[@name='{unused}']</xpath><comment>{ticket}</comment></audit-comment></set>", verify=False) if __name__ == '__main__': #Defining variables tag = datetime.datetime.now().strftime("%d/%m/%y") print(f"Enter Panorama IP/FQDN:") fw = input("> ") print(f"Enter Username and PW for {fw}") print("Username:") user = input("> ") print("Password:") pw = getpass.getpass("> ") key = get_key.generate(fw, user, pw) print("\nEnter CHG or RITM number") #Regex check to ensure correct service now ticket has been entered while True: ticket = input("> ") audit = bool(re.match("(CHG|RITM|INC)[0-9]{7}", ticket)) if audit == True: break else: print("\nThat's not a valid snow ticket, the format should be CHG/RITM/INC followed by 7 digits") #Pull device groups from text file and append to list dgrptxt = open('dgrp.txt', 'r') dgrp = dgrptxt.readlines() dgrptxt.close() dgrplist = [] for each in dgrp: dgrplist.append(each.strip("\r\n")) #Pulls rules from ignorelist ignoretxt = open('ignorelist.txt', 'r') ignore = ignoretxt.readlines() ignoretxt.close() ignorelist = [] for each in ignore: ignorelist.append(each.strip("\r\n")) #List to manage multithread proclist = [] #Creating csv to add results and send email result_log=("TaggedRules-" + datetime.datetime.now().strftime("%d-%m-%Y") + ".csv") resultcsv = open(result_log,'w', newline='') resultWriter = csv.writer(resultcsv, delimiter=',') resultWriter.writerow(["Tagged Rule","Device Group"]) create_new_tag(fw, key, tag) #Creates a new thread for each device group for i in dgrplist: dgrp = i proc = threading.Thread(target=get_unused_rules, args=[fw, key, dgrp, ticket]) proc.start() proclist.append(proc) # Wait for Thread to finish for x in proclist: x.join() print(f"\nTagging complete, please commit to Panorama. You can find results in {result_log}") #Close csv, send email, exit script resultcsv.close() sys.exit()
lenovo_fix.py
#!/usr/bin/env python3 import configparser import dbus import glob import os import psutil import struct import subprocess import sys from collections import defaultdict from dbus.mainloop.glib import DBusGMainLoop from errno import EACCES, EPERM from mmio import MMIO, MMIOError from multiprocessing import cpu_count from threading import Event, Thread try: from gi.repository import GObject except ImportError: import gobject as GObject SYSFS_POWER_PATH = '/sys/class/power_supply/AC/online' CONFIG_PATH = '/etc/lenovo_fix.conf' VOLTAGE_PLANES = { 'CORE': 0, 'GPU': 1, 'CACHE': 2, 'UNCORE': 3, 'ANALOGIO': 4, } TRIP_TEMP_RANGE = (40, 97) power = {'source': None, 'method': 'polling'} def writemsr(msr, val): n = ['/dev/cpu/{:d}/msr'.format(x) for x in range(cpu_count())] if not os.path.exists(n[0]): try: subprocess.check_call(('modprobe', 'msr')) except subprocess.CalledProcessError: print('[E] Unable to load the msr module.') sys.exit(1) try: for c in n: f = os.open(c, os.O_WRONLY) os.lseek(f, msr, os.SEEK_SET) os.write(f, struct.pack('Q', val)) os.close(f) except (IOError, OSError) as e: if e.errno == EPERM or e.errno == EACCES: print('[E] Unable to write to MSR. Try to disable Secure Boot.') sys.exit(1) else: raise e def is_on_battery(): with open(SYSFS_POWER_PATH) as f: return not bool(int(f.read())) def calc_time_window_vars(t): for Y in range(2**5): for Z in range(2**2): if t <= (2**Y) * (1. + Z / 4.) * 0.000977: return (Y, Z) raise ValueError('Unable to find a good combination!') def undervolt(config): for plane in VOLTAGE_PLANES: writemsr(0x150, calc_undervolt_msr(plane, config.getfloat('UNDERVOLT', plane))) def calc_undervolt_msr(plane, offset): assert offset <= 0 assert plane in VOLTAGE_PLANES offset = int(round(offset * 1.024)) offset = 0xFFE00000 & ((offset & 0xFFF) << 21) return 0x8000001100000000 | (VOLTAGE_PLANES[plane] << 40) | offset def load_config(): config = configparser.ConfigParser() config.read(CONFIG_PATH) # config values sanity check for power_source in ('AC', 'BATTERY'): for option in ( 'Update_Rate_s', 'PL1_Tdp_W', 'PL1_Duration_s', 'PL2_Tdp_W', 'PL2_Duration_S', ): config.set(power_source, option, str(max(0.1, config.getfloat(power_source, option)))) trip_temp = config.getfloat(power_source, 'Trip_Temp_C') valid_trip_temp = min(TRIP_TEMP_RANGE[1], max(TRIP_TEMP_RANGE[0], trip_temp)) if trip_temp != valid_trip_temp: config.set(power_source, 'Trip_Temp_C', str(valid_trip_temp)) print('[!] Overriding invalid "Trip_Temp_C" value in "{:s}": {:.1f} -> {:.1f}'.format( power_source, trip_temp, valid_trip_temp)) for plane in VOLTAGE_PLANES: value = config.getfloat('UNDERVOLT', plane) valid_value = min(0, value) if value != valid_value: config.set('UNDERVOLT', plane, str(valid_value)) print('[!] Overriding invalid "UNDERVOLT" value in "{:s}" voltage plane: {:.0f} -> {:.0f}'.format( plane, value, valid_value)) return config def calc_reg_values(config): regs = defaultdict(dict) for power_source in ('AC', 'BATTERY'): # the critical temperature for this CPU is 100 C trip_offset = int(round(100 - config.getfloat(power_source, 'Trip_Temp_C'))) regs[power_source]['MSR_TEMPERATURE_TARGET'] = trip_offset << 24 # 0.125 is the power unit of this CPU PL1 = int(round(config.getfloat(power_source, 'PL1_Tdp_W') / 0.125)) Y, Z = calc_time_window_vars(config.getfloat(power_source, 'PL1_Duration_s')) TW1 = Y | (Z << 5) PL2 = int(round(config.getfloat(power_source, 'PL2_Tdp_W') / 0.125)) Y, Z = calc_time_window_vars(config.getfloat(power_source, 'PL2_Duration_s')) TW2 = Y | (Z << 5) regs[power_source]['MSR_PKG_POWER_LIMIT'] = PL1 | (1 << 15) | (TW1 << 17) | (PL2 << 32) | (1 << 47) | ( TW2 << 49) return regs def set_hwp(pref): # set HWP energy performance hints assert pref in ('performance', 'balance_performance', 'default', 'balance_power', 'power') n = glob.glob('/sys/devices/system/cpu/cpu[0-9]*/cpufreq/energy_performance_preference') for c in n: with open(c, 'wb') as f: f.write(pref.encode()) def power_thread(config, regs, exit_event): try: mchbar_mmio = MMIO(0xfed159a0, 8) except MMIOError: print('[E] Unable to open /dev/mem. Try to disable Secure Boot.') sys.exit(1) while not exit_event.is_set(): # if power['method'] == 'polling': power['source'] = 'BATTERY' if is_on_battery() else 'AC' # set temperature trip point writemsr(0x1a2, regs[power['source']]['MSR_TEMPERATURE_TARGET']) # set PL1/2 on MSR writemsr(0x610, regs[power['source']]['MSR_PKG_POWER_LIMIT']) # set MCHBAR register to the same PL1/2 values mchbar_mmio.write32(0, regs[power['source']]['MSR_PKG_POWER_LIMIT'] & 0xffffffff) mchbar_mmio.write32(4, regs[power['source']]['MSR_PKG_POWER_LIMIT'] >> 32) wait_t = config.getfloat(power['source'], 'Update_Rate_s') enable_hwp_mode = config.getboolean('AC', 'HWP_Mode', fallback=False) if power['source'] == 'AC' and enable_hwp_mode: cpu_usage = float(psutil.cpu_percent(interval=wait_t)) # set full performance mode only when load is greater than this threshold (~ at least 1 core full speed) performance_mode = cpu_usage > 100. / (cpu_count() * 1.25) # check again if we are on AC, since in the meantime we might have switched to BATTERY if not is_on_battery(): set_hwp('performance' if performance_mode else 'balance_performance') else: exit_event.wait(wait_t) def main(): if os.geteuid() != 0: print('[E] No root no party. Try again with sudo.') sys.exit(1) power['source'] = 'BATTERY' if is_on_battery() else 'AC' config = load_config() regs = calc_reg_values(config) if not config.getboolean('GENERAL', 'Enabled'): return exit_event = Event() thread = Thread(target=power_thread, args=(config, regs, exit_event)) thread.daemon = True thread.start() undervolt(config) # handle dbus events for applying undervolt on resume from sleep/hybernate def handle_sleep_callback(sleeping): if not sleeping: undervolt(config) def handle_ac_callback(*args): try: power['source'] = 'BATTERY' if args[1]['Online'] == 0 else 'AC' power['method'] = 'dbus' except: power['method'] = 'polling' DBusGMainLoop(set_as_default=True) bus = dbus.SystemBus() # add dbus receiver only if undervolt is enabled in config if any(config.getfloat('UNDERVOLT', plane) != 0 for plane in VOLTAGE_PLANES): bus.add_signal_receiver(handle_sleep_callback, 'PrepareForSleep', 'org.freedesktop.login1.Manager', 'org.freedesktop.login1') bus.add_signal_receiver( handle_ac_callback, signal_name="PropertiesChanged", dbus_interface="org.freedesktop.DBus.Properties", path="/org/freedesktop/UPower/devices/line_power_AC") try: GObject.threads_init() loop = GObject.MainLoop() loop.run() except (KeyboardInterrupt, SystemExit): pass exit_event.set() loop.quit() thread.join(timeout=1) if __name__ == '__main__': main()
test_failure_2.py
import logging import os import signal import sys import threading import time import numpy as np import pytest import ray from ray.experimental.internal_kv import _internal_kv_get from ray.ray_constants import DEBUG_AUTOSCALING_ERROR import ray._private.utils from ray.util.placement_group import placement_group import ray.ray_constants as ray_constants from ray.cluster_utils import cluster_not_supported import ray._private.gcs_pubsub as gcs_pubsub from ray._private.test_utils import ( init_error_pubsub, get_error_message, get_log_batch, Semaphore, wait_for_condition, run_string_as_driver_nonblocking, ) def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub): p = error_pubsub # Check that we get warning messages for infeasible tasks. @ray.remote(num_gpus=1) def f(): pass @ray.remote(resources={"Custom": 1}) class Foo: pass # This task is infeasible. f.remote() errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR # This actor placement task is infeasible. foo = Foo.remote() print(foo) errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR # Placement group cannot be made, but no warnings should occur. total_cpus = ray.cluster_resources()["CPU"] # Occupy one cpu by an actor @ray.remote(num_cpus=1) class A: pass a = A.remote() print(a) @ray.remote(num_cpus=total_cpus) def g(): pass pg = placement_group([{"CPU": total_cpus}], strategy="STRICT_PACK") g.options(placement_group=pg).remote() errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR, timeout=5) assert len(errors) == 0, errors def test_warning_for_infeasible_zero_cpu_actor(shutdown_only): # Check that we cannot place an actor on a 0 CPU machine and that we get an # infeasibility warning (even though the actor creation task itself # requires no CPUs). ray.init(num_cpus=0) p = init_error_pubsub() @ray.remote class Foo: pass # The actor creation should be infeasible. a = Foo.remote() errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR p.close() del a def test_warning_for_too_many_actors(shutdown_only): # Check that if we run a workload which requires too many workers to be # started that we will receive a warning. num_cpus = 2 ray.init(num_cpus=num_cpus) p = init_error_pubsub() @ray.remote class Foo: def __init__(self): time.sleep(1000) # NOTE: We should save actor, otherwise it will be out of scope. actor_group1 = [Foo.remote() for _ in range(num_cpus * 10)] assert len(actor_group1) == num_cpus * 10 errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR actor_group2 = [Foo.remote() for _ in range(num_cpus * 3)] assert len(actor_group2) == num_cpus * 3 errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR p.close() def test_warning_for_too_many_nested_tasks(shutdown_only): # Check that if we run a workload which requires too many workers to be # started that we will receive a warning. num_cpus = 2 ray.init(num_cpus=num_cpus) p = init_error_pubsub() remote_wait = Semaphore.remote(value=0) nested_wait = Semaphore.remote(value=0) ray.get( [ remote_wait.locked.remote(), nested_wait.locked.remote(), ] ) @ray.remote(num_cpus=0.25) def f(): time.sleep(1000) return 1 @ray.remote(num_cpus=0.25) def h(nested_waits): nested_wait.release.remote() ray.get(nested_waits) ray.get(f.remote()) @ray.remote(num_cpus=0.25) def g(remote_waits, nested_waits): # Sleep so that the f tasks all get submitted to the scheduler after # the g tasks. remote_wait.release.remote() # wait until every lock is released. ray.get(remote_waits) ray.get(h.remote(nested_waits)) num_root_tasks = num_cpus * 4 # Lock remote task until everything is scheduled. remote_waits = [] nested_waits = [] for _ in range(num_root_tasks): remote_waits.append(remote_wait.acquire.remote()) nested_waits.append(nested_wait.acquire.remote()) [g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)] errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR) assert len(errors) == 1 assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR p.close() def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only): ray.init(num_cpus=1) @ray.remote def create_remote_function(): @ray.remote def g(): return 1 return ray.get(g.remote()) for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1): ray.get(create_remote_function.remote()) import io log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) # TODO(rkn): It's terrible to have to rely on this implementation detail, # the fact that the warning comes from ray._private.import_thread.logger. # However, I didn't find a good way to capture the output for all loggers # simultaneously. ray._private.import_thread.logger.addHandler(ch) ray.get(create_remote_function.remote()) start_time = time.time() while time.time() < start_time + 10: log_contents = log_capture_string.getvalue() if len(log_contents) > 0: break ray._private.import_thread.logger.removeHandler(ch) assert "remote function" in log_contents assert ( "has been exported {} times.".format( ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD ) in log_contents ) # Now test the same thing but for actors. @ray.remote def create_actor_class(): # Require a GPU so that the actor is never actually created and we # don't spawn an unreasonable number of processes. @ray.remote(num_gpus=1) class Foo: pass Foo.remote() for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1): ray.get(create_actor_class.remote()) log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) # TODO(rkn): As mentioned above, it's terrible to have to rely on this # implementation detail. ray._private.import_thread.logger.addHandler(ch) ray.get(create_actor_class.remote()) start_time = time.time() while time.time() < start_time + 10: log_contents = log_capture_string.getvalue() if len(log_contents) > 0: break ray._private.import_thread.logger.removeHandler(ch) assert "actor" in log_contents assert ( "has been exported {} times.".format( ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD ) in log_contents ) # Note that this test will take at least 10 seconds because it must wait for # the monitor to detect enough missed heartbeats. def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub): cluster = ray_start_cluster_2_nodes cluster.wait_for_nodes() p = error_pubsub node_ids = {item["NodeID"] for item in ray.nodes()} # Try to make sure that the monitor has received at least one heartbeat # from the node. time.sleep(0.5) # Kill both raylets. cluster.list_all_nodes()[1].kill_raylet() cluster.list_all_nodes()[0].kill_raylet() # Check that we get warning messages for both raylets. errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40) # Extract the client IDs from the error messages. This will need to be # changed if the error message changes. warning_node_ids = {error.error_message.split(" ")[5] for error in errors} assert node_ids == warning_node_ids @pytest.mark.skipif( sys.platform == "win32", reason="Killing process on Windows does not raise a signal" ) def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub): # Terminate the autoscaler process. from ray.worker import _global_node autoscaler_process = _global_node.all_processes[ray_constants.PROCESS_TYPE_MONITOR][ 0 ].process autoscaler_process.terminate() # Confirm that we receive an autoscaler failure error. errors = get_error_message( error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5 ) assert len(errors) == 1 # Confirm that the autoscaler failure error is stored. error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR) assert error is not None def test_raylet_crash_when_get(ray_start_regular): def sleep_to_kill_raylet(): # Don't kill raylet before default workers get connected. time.sleep(2) ray.worker._global_node.kill_raylet() object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8)) ray.internal.free(object_ref) thread = threading.Thread(target=sleep_to_kill_raylet) thread.start() with pytest.raises(ray.exceptions.ReferenceCountingAssertionError): ray.get(object_ref) thread.join() @pytest.mark.parametrize( "ray_start_cluster", [ { "num_nodes": 1, "num_cpus": 2, }, { "num_nodes": 2, "num_cpus": 1, }, ], indirect=True, ) def test_eviction(ray_start_cluster): @ray.remote def large_object(): return np.zeros(10 * 1024 * 1024) obj = large_object.remote() assert isinstance(ray.get(obj), np.ndarray) # Evict the object. ray.internal.free([obj]) # ray.get throws an exception. with pytest.raises(ray.exceptions.ReferenceCountingAssertionError): ray.get(obj) @ray.remote def dependent_task(x): return # If the object is passed by reference, the task throws an # exception. with pytest.raises(ray.exceptions.RayTaskError): ray.get(dependent_task.remote(obj)) @pytest.mark.parametrize( "ray_start_cluster", [ { "num_nodes": 2, "num_cpus": 1, }, { "num_nodes": 1, "num_cpus": 2, }, ], indirect=True, ) def test_serialized_id(ray_start_cluster): @ray.remote def small_object(): # Sleep a bit before creating the object to force a timeout # at the getter. time.sleep(1) return 1 @ray.remote def dependent_task(x): return x @ray.remote def get(obj_refs, test_dependent_task): print("get", obj_refs) obj_ref = obj_refs[0] if test_dependent_task: assert ray.get(dependent_task.remote(obj_ref)) == 1 else: assert ray.get(obj_ref) == 1 obj = small_object.remote() ray.get(get.remote([obj], False)) obj = small_object.remote() ray.get(get.remote([obj], True)) obj = ray.put(1) ray.get(get.remote([obj], False)) obj = ray.put(1) ray.get(get.remote([obj], True)) @pytest.mark.xfail(cluster_not_supported, reason="cluster not supported") @pytest.mark.parametrize( "use_actors,node_failure", [(False, False), (False, True), (True, False), (True, True)], ) def test_fate_sharing(ray_start_cluster, use_actors, node_failure): config = { "num_heartbeats_timeout": 10, "raylet_heartbeat_period_milliseconds": 100, } cluster = ray_start_cluster # Head node with no resources. cluster.add_node(num_cpus=0, _system_config=config) ray.init(address=cluster.address) # Node to place the parent actor. node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1}) # Node to place the child actor. cluster.add_node(num_cpus=1, resources={"child": 1}) cluster.wait_for_nodes() @ray.remote def sleep(): time.sleep(1000) @ray.remote(resources={"child": 1}) def probe(): return # TODO(swang): This test does not pass if max_restarts > 0 for the # raylet codepath. Add this parameter once the GCS actor service is enabled # by default. @ray.remote class Actor(object): def __init__(self): return def start_child(self, use_actors): if use_actors: child = Actor.options(resources={"child": 1}).remote() ray.get(child.sleep.remote()) else: ray.get(sleep.options(resources={"child": 1}).remote()) def sleep(self): time.sleep(1000) def get_pid(self): return os.getpid() # Returns whether the "child" resource is available. def child_resource_available(): p = probe.remote() ready, _ = ray.wait([p], timeout=1) return len(ready) > 0 # Test fate sharing if the parent process dies. def test_process_failure(use_actors): a = Actor.options(resources={"parent": 1}).remote() pid = ray.get(a.get_pid.remote()) a.start_child.remote(use_actors=use_actors) # Wait for the child to be scheduled. wait_for_condition(lambda: not child_resource_available()) # Kill the parent process. os.kill(pid, 9) wait_for_condition(child_resource_available) # Test fate sharing if the parent node dies. def test_node_failure(node_to_kill, use_actors): a = Actor.options(resources={"parent": 1}).remote() a.start_child.remote(use_actors=use_actors) # Wait for the child to be scheduled. wait_for_condition(lambda: not child_resource_available()) # Kill the parent process. cluster.remove_node(node_to_kill, allow_graceful=False) node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1}) wait_for_condition(child_resource_available) return node_to_kill if node_failure: test_node_failure(node_to_kill, use_actors) else: test_process_failure(use_actors) @pytest.mark.parametrize( "ray_start_regular", [{"_system_config": {"gcs_rpc_server_reconnect_timeout_s": 100}}], indirect=True, ) @pytest.mark.skipif( gcs_pubsub.gcs_pubsub_enabled(), reason="Logs are streamed via GCS pubsub when it is enabled, so logs " "cannot be delivered after GCS is killed.", ) def test_gcs_server_failiure_report(ray_start_regular, log_pubsub): # Get gcs server pid to send a signal. all_processes = ray.worker._global_node.all_processes gcs_server_process = all_processes["gcs_server"][0].process gcs_server_pid = gcs_server_process.pid # TODO(mwtian): make sure logs are delivered after GCS is restarted. if sys.platform == "win32": sig = 9 else: sig = signal.SIGBUS os.kill(gcs_server_pid, sig) # wait for 30 seconds, for the 1st batch of logs. batches = get_log_batch(log_pubsub, 1, timeout=30) assert gcs_server_process.poll() is not None if sys.platform != "win32": # Windows signal handler does not run when process is terminated assert len(batches) == 1 assert batches[0]["pid"] == "gcs_server", batches def test_list_named_actors_timeout(monkeypatch, shutdown_only): with monkeypatch.context() as m: # defer for 3s m.setenv( "RAY_testing_asio_delay_us", "ActorInfoGcsService.grpc_server.ListNamedActors" "=3000000:3000000", ) ray.init(_system_config={"gcs_server_request_timeout_seconds": 1}) @ray.remote class A: pass a = A.options(name="hi").remote() print(a) with pytest.raises(ray.exceptions.GetTimeoutError): ray.util.list_named_actors() def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub): cluster = ray_start_cluster_head redis_port = int(cluster.address.split(":")[1]) # Reuse redis port to make node manager grpc server fail to start. with pytest.raises(Exception): cluster.add_node(wait=False, node_manager_port=redis_port) # wait for max 10 seconds. def matcher(log_batch): return log_batch["pid"] == "raylet" and any( "Failed to start the grpc server." in line for line in log_batch["lines"] ) match = get_log_batch(log_pubsub, 1, timeout=10, matcher=matcher) assert len(match) > 0 def test_gcs_server_crash_cluster(ray_start_cluster): # Test the GCS server failures will crash the driver. cluster = ray_start_cluster GCS_RECONNECTION_TIMEOUT = 5 node = cluster.add_node( num_cpus=0, _system_config={"gcs_rpc_server_reconnect_timeout_s": GCS_RECONNECTION_TIMEOUT}, ) script = """ import ray import time ray.init(address="auto") time.sleep(60) """ # Get gcs server pid to send a signal. all_processes = node.all_processes gcs_server_process = all_processes["gcs_server"][0].process gcs_server_pid = gcs_server_process.pid proc = run_string_as_driver_nonblocking(script) # Wait long enough to start the driver. time.sleep(5) start = time.time() print(gcs_server_pid) os.kill(gcs_server_pid, signal.SIGKILL) wait_for_condition(lambda: proc.poll() is None, timeout=10) # Make sure the driver was exited within the timeout instead of hanging. # * 2 for avoiding flakiness. assert time.time() - start < GCS_RECONNECTION_TIMEOUT * 2 # Make sure all processes are cleaned up after GCS is crashed. # Currently, not every process is fate shared with GCS. # It seems like log monitor, ray client server, and Redis # are not fate shared. # TODO(sang): Fix it. # wait_for_condition(lambda: not node.any_processes_alive()) if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
ex2_nolock.py
import multiprocessing # python -m timeit -s "import ex2_nolock" "ex2_nolock.run_workers()" # 12ms def work(value, max_count): for n in range(max_count): value.value += 1 def run_workers(): NBR_PROCESSES = 4 MAX_COUNT_PER_PROCESS = 1000 total_expected_count = NBR_PROCESSES * MAX_COUNT_PER_PROCESS processes = [] value = multiprocessing.Value('i', 0) for process_nbr in range(NBR_PROCESSES): p = multiprocessing.Process(target=work, args=(value, MAX_COUNT_PER_PROCESS)) p.start() processes.append(p) # wait for the processes to finish for p in processes: p.join() # print the final value print("Expecting to see a count of {}".format(total_expected_count)) print("We have counted to {}".format(value.value)) if __name__ == "__main__": run_workers()
apctrl.py
#!/usr/bin/env python2 """ This module was made to wrap the hostapd """ import os import threading import ctypes from roguehostapd.config.hostapdconfig import HostapdConfig import roguehostapd.config.hostapdconfig as hostapdconfig class KarmaData(ctypes.Structure): """ Handle the hostapd return mac/ssid data """ pass KarmaData._fields_ = [ ("is_assoc", ctypes.c_ubyte), ("ssid_len", ctypes.c_size_t), ("ssid", ctypes.c_ubyte * 32), ("mac_addr", ctypes.c_ubyte * 6), ("next_data", ctypes.POINTER(KarmaData))] class Hostapd(object): """ Hostapd wrapper class """ def __init__(self): """ Contruct the class :param self: A Hostapd object :type self: Hostapd :return: None :rtype: None """ self.config_obj = None self.hostapd_thread = None self.hostapd_lib = None self.config_obj = HostapdConfig() @staticmethod def _parse_karma_data(karma_data): """ get the associated clients' mac address and essid :param self: A Hostapd object :type self: Hostapd :param karma_data: A KarmaData object :type karma_data: KarmaData :return: A list of tuple of essid and mac address tuple :rtype: list """ ret = [] if karma_data: current = karma_data while current: if current.contents.is_assoc: # convert ssid_len to integer ssid_len = int(current.contents.ssid_len) # convert mac address to string mac_addr = current.contents.mac_addr mac_l = [format(mac_addr[i], 'x') for i in range(6)] mac_str = ':'.join(mac_l) # convert ssid to string ssid_buf = current.contents.ssid ssid_list = [ssid_buf[i] for i in range(ssid_len)] ssid = ''.join(map(chr, ssid_list)) ret.append((mac_str, ssid)) current = current.contents.next_data return ret def get_karma_data(self): """ get the data for the KARMA attack victims from hostapd :param self: A Hostapd object :type self: Hostapd :return: A list of tuple of essid and mac address tuple :rtype: list """ karma_data = self.hostapd_lib.get_assoc_karma_data() mac_ssid_pairs = self._parse_karma_data(karma_data) return mac_ssid_pairs def is_alive(self): """ API for check if the hostapd thread is running :param self: A Hostapd object :type self: Hostapd :return: True if the hostapd is running else False :rtype: bool """ return self.hostapd_thread.is_alive() def create_hostapd_conf_file(self, hostapd_config, options): """ Create the roguehostapd configuration file :param self: A Hostapd object :type self: Hostapd :param hostapd_config: Hostapd configuration for hostapd.conf :type hostapd_config: dict :param options: Hostapd command line options :type options: dict :return: None :rtype: None """ self.config_obj.init_config() self.config_obj.write_configs(hostapd_config, options) def start(self, hostapd_config, options): """ Start the hostapd process :param self: A Hostapd object :type self: Hostapd :param hostapd_config: Hostapd configuration for hostapd.conf :type hostapd_config: dict :param options: Hostapd command line options :type options: dict :return: None :rtype: None ..note: the start function uses ctypes to load the shared library of hostapd and use it to call the main function to lunch the AP """ # update the hostapd configuration based on user input self.create_hostapd_conf_file(hostapd_config, options) # get the hostapd command to lunch the hostapd hostapd_cmd = [hostapdconfig.HOSTAPD_EXECUTION_PATH, hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH] for key in self.config_obj.options: if self.config_obj.options[key]: hostapd_cmd += self.config_obj.options[key] num_of_args = len(hostapd_cmd) str_arr_type = ctypes.c_char_p * num_of_args hostapd_cmd = str_arr_type(*hostapd_cmd) # get the hostapd shared library self.hostapd_lib = ctypes.cdll.LoadLibrary( hostapdconfig.HOSTAPD_LIBPATH) # init hostapd lib info self.hostapd_lib.get_assoc_karma_data.restype = ctypes.POINTER( KarmaData) # start the hostapd thread self.hostapd_thread = threading.Thread( target=self.hostapd_lib.main, args=(len(hostapd_cmd), hostapd_cmd)) self.hostapd_thread.start() def stop(self): """ Stop the hostapd :param self: A Hostapd object :type self: Hostapd :return: None :rtype: None ..note: the stop function uses the eloop_terminate function in hostapd shared library to stop AP. """ self.hostapd_lib.eloop_terminate() if self.hostapd_thread.is_alive(): self.hostapd_thread.join(5) if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH): os.remove(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH) if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH): os.remove(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH) if __name__ == '__main__': HOSTAPD_CONFIG_DICT = { 'ssid': 'test', 'interface': 'wlan0', 'karma_enable': 1, 'deny_macs': ['00:00:00:11:22:33'] } HOSTAPD_OPTION_DICT = { 'debug_verbose': True, 'key_data': True, 'timestamp': False, 'version': False, 'mute': True, 'eloop_term_disable': True} HOSTAPD_OBJ = Hostapd() HOSTAPD_OBJ.start(HOSTAPD_CONFIG_DICT, HOSTAPD_OPTION_DICT) import time while True: try: time.sleep(1) except KeyboardInterrupt: HOSTAPD_OBJ.stop() break
stego-maker-gui.py
from Tkinter import * import ttk import pvd.pvd import tkMessageBox import tkFileDialog as tfd from PIL import ImageTk, Image import matplotlib.pyplot as plt import pvd.bit_planes.bit_planes as pvd_bits import numpy as np import threading """ AUTHOR: Himanshu Sharma """ class Page(Frame): # Page class is a standard class to define frames in the Tk() window. def __init__(self, *args, **kwargs): Frame.__init__(self, *args, **kwargs) def show(self): self.lift() class BitPlanePage(Page): # This Page shows the bit planes. Inherits Page class. def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) self.configure(background='black') self.header = Label(self, text="Bit-Plane Generator", font=("consolas", 30), bg = "#00FF87", fg = "#FF003A").pack(fill=BOTH) self.image = Label(self, text="", font=('courier', 20), fg="#83FF00", bg='black') self.image.pack() self.title_header = Label(self, text="Enter plane number (1-8)", font=("consolas", 15), fg = "#00FF87", bg='black').pack() self.plane = ttk.Entry(self, width=50) self.plane.pack(pady=10) self.choose = Button(self, text="Browse", command=self.browse, width=30) self.choose.pack() self.gen_bit_plane_button = Button(self, text="Generate", command=self.generate_bit_plane, width=30, fg="black", bg="red") self.gen_bit_plane_button.pack() self.gen_bit_plane_button.config(state="disabled") self.goback = Button(self, text="Go Back", command=lambda: main.hp.show(), width=30) self.goback.pack() def browse(self): try: f = tfd.askopenfile() self.image['text'] = f.name if self.image['text'] != "": self.gen_bit_plane_button.config(state="normal") except AttributeError: pass def generate_bit_plane(self): try: if int(self.plane.get()) in range(1, 9): bitplane = pvd_bits.generateBitPlane(self.image['text'], int(self.plane.get())) plt.imshow(bitplane, cmap='gray') plt.show() else: tkMessageBox.showinfo("Alert", "Bit plane numbers lie in range 1 to 8 and they are integers.") except: tkMessageBox.showwarning("Alert", "Something is wrong.") class StegoGeneratorPage(Page): # This page is for Stego generation. def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) self.configure(background='black') self.header = Label(self, text="Stego-Generator", font=("consolas", 30), bg = "#00FF87", fg = "#FF003A").pack(fill=BOTH) self.file_chosen = Label(self, text="", font=('courier', 20), fg="#83FF00", bg='black') self.file_chosen.pack(pady=5) self.message_chosen = Label(self, text="", font=('courier', 20), fg="#FF3A00", bg='black') self.message_chosen.pack(pady=5) self.block_size = ttk.Entry(self, width=50) self.block_size.pack(pady=5) self.block_size.insert(END, "Block size goes here...") self.browse_button = Button(self, text="Browse Cover Image", command=self.select_cover, width=25) self.browse_button.pack() self.message_file = Button(self, text="Browse Message Text File", command=self.select_message, width=25) self.message_file.pack() self.embedd_button = Button(self, text="Embedd", command=lambda: threading.Thread(target=self.hide).start(), width=25 , bg = "red", fg="black") self.embedd_button.pack() self.embedd_button.config(state="disabled") self.goback_button = Button(self, text="Go Back", command=self.goback, width=25) self.goback_button.pack() Status_Static = Label(self, text="Status", font=('courier', 15), fg="#FF0017", bg='black').pack() self.status = Label(self, text="Nothing happening right now...", fg="#00FF87", font=('courier', 12), bg='black') self.status.pack() self.selected_image = Label(self, bg='black') self.selected_image.pack(fill=BOTH, expand=YES) def goback(self): main.hp.show() def resize_photo(self, image, width): w, h = image.size asp = w*h**(-1) new_height = width*asp**(-1) image = image.resize((width, int(new_height)), Image.ANTIALIAS) return image def select_cover(self): try: f = tfd.askopenfile() self.file_chosen['text'] = f.name img = Image.open(f.name) self.img = self.resize_photo(img, root.winfo_width()-10) self.img = ImageTk.PhotoImage(self.img) self.selected_image.configure(image = self.img) if self.message_chosen['text'] != "": self.embedd_button.config(state="normal") except AttributeError: pass def select_message(self): try: f = tfd.askopenfile() self.message_chosen['text'] = f.name if self.file_chosen['text'] != "": self.embedd_button.config(state="normal") except AttributeError: pass def hide(self): f = open(self.message_chosen['text'], 'r') message = f.read() f.close() try: block = int(self.block_size.get()) self.status['text'] = "Process initiated. It will take few seconds." key = pvd.pvd.pixelate(self.file_chosen['text'], message, block) self.status['text'] = "Done" tkMessageBox.showinfo("Alert", "Cover image has been encrypted in the same folder. Stego-image: final.final.png") except: tkMessageBox.showwarning("Alert", "Something is wrong!!") class StegoDecoderPage(Page): # this page is for decoding the stego image. def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) self.configure(background='black') self.header = Label(self, text="Stego-Exposer", font=("consolas", 30), bg = "#00FF87", fg = "#FF003A").pack(fill=BOTH) self.file_chosen = Label(self, text="", font=('courier', 20), fg="#83FF00", bg='black') self.file_chosen.pack() self.key_chosen = Label(self, text="", font=('courier', 20), fg="#FF3A00", bg='black') self.key_chosen.pack() self.choose_file = Button(self, text="Choose Stego-Image", command=self.choose_stego, width=25) self.choose_file.pack() self.choose_key = Button(self, text="Choose Key File", command=self.choose_key_file, width=25) self.choose_key.pack() self.decode_button = Button(self, text="Decode Image", command=lambda: threading.Thread(target=self.decode).start(), width=25 , bg = "red", fg="black") self.decode_button.pack() self.decode_button.config(state="disabled") self.show_image = Button(self, text="Show Image", command=self.display, width=25 , bg = "green", fg="white") self.show_image.pack() self.show_image.config(state="disabled") self.goback_button = Button(self, text="Go Back", command=lambda: main.hp.show(), width=25) self.goback_button.pack() self.clear = Button(self, text="Clear", command=lambda: self.area.delete('1.0', END), width=25) self.clear.pack() self.heading = Label(self, text="Decoded text will be displayed here.", font=('courier', 15), fg="#00FF87" , bg='black').pack() scrollbar = Scrollbar(self) scrollbar.pack(side = RIGHT, fill = Y) self.area = Text(self, yscrollcommand = scrollbar.set, bg="#837E7D", fg='black') self.area.pack(expand=True, fill='both') scrollbar.config(command = self.area.yview) def choose_stego(self): try: f = tfd.askopenfile() self.file_chosen['text'] = f.name self.img = Image.open(self.file_chosen['text']) self.show_image.config(state="normal") if self.key_chosen['text'] != "": self.decode_button.config(state="normal") except AttributeError: pass def choose_key_file(self): try: f = tfd.askopenfile() self.key_chosen['text'] = f.name if self.file_chosen['text'] != "": self.decode_button.config(state="normal") self.show_image.config(state="normal") except AttributeError: pass def display(self): plt.imshow(self.img) plt.show() def decode(self): self.message = pvd.pvd.decode(self.file_chosen['text'], self.key_chosen['text']) self.area.delete('1.0', END) self.area.insert(END, self.message) class DevPage(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) self.configure(background='black') scrollbar = Scrollbar(self) scrollbar.pack(side = RIGHT, fill = Y) self.area = Text(self, yscrollcommand = scrollbar.set, bg="black", fg='white', bd=0) self.area.pack(expand=True, fill='both') scrollbar.config(command = self.area.yview) self.area.tag_configure("center", justify='center') abtText = open('about.txt', 'r') text = abtText.read() abtText.close() self.area.insert(END, text) self.area.configure(state="disabled") self.area.tag_add("center", 1.0, "end") self.goback_button = Button(self, text="Go Back", command=lambda: main.hp.show(), width=25) self.goback_button.pack() class HomePage(Page): # Homepage. def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) self.configure(background='black') self.heading = Label(self, text = "Image Steganographer", bg = 'black', font=('consolas', 30), fg="#00FF6C").pack(pady=10) self.stego_generator_button = Button(self, text="Stego-Generator", command=lambda: main.sgp.show(), width=19 , bg="white", fg="red", height=5) self.stego_generator_button.pack(pady=10) self.stego_generator_button.place(x=80, y=90) self.stego_decoder_button = Button(self, text="Stego-Exposer", command=lambda: main.sdp.show(), width=19 , bg="white", fg="red", height=5) self.stego_decoder_button.pack(pady=10) self.stego_decoder_button.place(x=420, y=90) self.stego_decoder_button = Button(self, text="Bit Plane Generator", command=lambda: main.bpp.show(), width=19 , bg="white", fg="red", height=5) self.stego_decoder_button.pack(pady=10) self.stego_decoder_button.place(x=80, y=230) self.devbutton = Button(self, text="About this Software", command=lambda: main.dev.show(), width=19 , bg="white", fg="red", height=5) self.devbutton.pack(pady=10) self.devbutton.place(x=420, y=230) self.icon_label = Label(self, bg='black') self.icon_label.pack() self.icon_label.place(x=245, y=400) icon = Image.open('logo.png') icon = ImageTk.PhotoImage(icon) self.icon_label.image = icon self.icon_label.configure(image=icon) class MainView(Frame): def __init__(self, *args, **kwargs): Frame.__init__(self, *args, **kwargs) self.hp = HomePage() self.sgp = StegoGeneratorPage() self.sdp = StegoDecoderPage() self.bpp = BitPlanePage() self.dev = DevPage() self.container = Frame(self) self.container.pack(side='top', fill='both', expand=True) self.hp.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1) self.sgp.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1) self.sdp.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1) self.bpp.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1) self.dev.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1) self.hp.show() if __name__ == '__main__': root = Tk() root.configure(background='black') main = MainView(root) main.pack(side = 'top', fill = 'both', expand = True) root.wm_geometry('700x650') root.resizable(height=0, width=0) root.title('Steganographer') root.mainloop()
receive_test.py
"""FPS_receive_test.py -- receive (text, image) pairs & print FPS stats A test program to provide FPS statistics as different imagenode algorithms are being tested. This program receives images OR images that have been jpg compressed, depending on the setting of the JPG option. It computes and prints FPS statistics. It is designed to be the receiver for the imagenode.py program or one of the test programs in the tests/unit_tests folder. Be sure to run this program and the sending program in virtual environments using Python 3.6 or newer. 1. Edit the options in this python program, such as the JPG option. Save it. 2. Set the yaml options on the imagenode sending RPi in the imagenode.yaml file at the home directory. Be sure that the jpg setting on the RPi matches the setting of JPG below. (If using one of the test programs, use git pull to bring a copy of the test program to the sending RPi) 2. Run this program in its own terminal window on the mac: python receive_test.py. This 'receive the images' program must be running before starting the RPi image sending program. 2. Run the imagenode image sending program on the RPi: python imagenode.py # OR run one of /tests/unit_tests programs on the RPi A cv2.imshow() window will only appear on the Mac that is receiving the tramsmitted images if the "SHOW_IMAGES" option below is set to True. The receiving program will run until the "TEST_DURATION" number of seconds is reached or until Ctrl-C is pressed. When the receiving program ends, it will compute and print FPS statistics and it will stop receiving images and sending ZMQ "REP" replies. That should cause the sending program on the RPi to stall and stop. Or you can end the sending program running on the RPi by pressing Ctrl-C. """ ######################################################################## # EDIT THES OPTIONS BEFORE RUNNING PROGRAM JPG = True # or False if receiving images SHOW_IMAGES = True TEST_DURATION = 30 # seconds or 0 to keep going until Ctrl-C ######################################################################## import cv2 import sys import signal import imagezmq import traceback import numpy as np from time import sleep from imutils.video import FPS from threading import Event, Thread from collections import defaultdict # instantiate image_hub image_hub = imagezmq.ImageHub() def receive_image(): text, image = image_hub.recv_image() return text, image def receive_jpg(): text, jpg_buffer = image_hub.recv_jpg() image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1) return text, image if JPG: receive_tuple = receive_jpg receive_type = 'jpg' else: receive_tuple = receive_image receive_type = 'native OpenCV' image_count = 0 sender_image_counts = defaultdict(int) # dict for counts by sender first_image = True text = None image = None if TEST_DURATION <= 0: TEST_DURATION = 999999 # a large number so Ctrl-C is only stopping method def receive_images_forever(): global image_count, sender_image_counts, first_image, text, image, fps keep_going = Event() keep_going.set() def timer(duration): sleep(duration) keep_going.clear() sleep(10) # allow cleanup finally time while keep_going.is_set(): # receive images until timer expires or Ctrl-C text, image = receive_tuple() if first_image: print('First Image Received. Starting FPS timer.') fps = FPS().start() # start FPS timer after first image is received Thread(target=timer, daemon=True, args=(TEST_DURATION,)).start() first_image = False fps.update() image_count += 1 # global count of all images received sender_image_counts[text] += 1 # count images for each RPi name if SHOW_IMAGES: cv2.imshow(text, image) # display images 1 window per unique text cv2.waitKey(1) image_hub.send_reply(b'OK') # REP reply try: print('FPS Test Program: ', __file__) print('Option settings:') print(' Receive Image Type:', receive_type) print(' Show Images:', SHOW_IMAGES) print(' Test Duration:', TEST_DURATION, ' seconds') receive_images_forever() sys.exit() except (KeyboardInterrupt, SystemExit): pass # Ctrl-C was pressed to end program; FPS stats computed below except Exception as ex: print('Python error with no Exception handler:') print('Traceback error:', ex) traceback.print_exc() finally: # stop the timer and display FPS information print() print('Total Number of Images received: {:,g}'.format(image_count)) if first_image: # never got images from any sender print('Never got any images from imagenode. Ending program.') sys.exit() fps.stop() print('Number of Images received for each text message type:') for text_message in sender_image_counts: print(' ', text_message, ': {:,g}'.format(sender_image_counts[text_message])) if JPG: compressed_size = len(image) print('Size of last jpg buffer received: {:,g} bytes'.format(compressed_size)) else: compressed_size = 1 image_size = image.shape print('Dimensions of last image received: ', image_size) uncompressed_size = 1 for dimension in image_size: uncompressed_size *= dimension print(' = {:,} bytes'.format(uncompressed_size)) print('Compressed to Uncompressed ratio: {:.8f}'.format(compressed_size / uncompressed_size)) print('Elasped time: {:,.2f} seconds'.format(fps.elapsed())) print('Approximate FPS: {:,.2f}'.format(fps.fps())) cv2.destroyAllWindows() # closes the windows opened by cv2.imshow() image_hub.close() # closes ZMQ socket and context sys.exit()
test_base.py
import asyncio import fcntl import logging import os import sys import threading import time import uvloop import unittest import weakref from unittest import mock from uvloop._testbase import UVTestCase, AIOTestCase class _TestBase: def test_close(self): self.assertFalse(self.loop._closed) self.assertFalse(self.loop.is_closed()) self.loop.close() self.assertTrue(self.loop._closed) self.assertTrue(self.loop.is_closed()) # it should be possible to call close() more than once self.loop.close() self.loop.close() # operation blocked when the loop is closed f = asyncio.Future() self.assertRaises(RuntimeError, self.loop.run_forever) self.assertRaises(RuntimeError, self.loop.run_until_complete, f) def test_handle_weakref(self): wd = weakref.WeakValueDictionary() h = self.loop.call_soon(lambda: None) wd['h'] = h # Would fail without __weakref__ slot. def test_call_soon_1(self): calls = [] def cb(inc): calls.append(inc) self.loop.stop() self.loop.call_soon(cb, 10) h = self.loop.call_soon(cb, 100) self.assertIn('.cb', repr(h)) h.cancel() self.assertIn('cancelled', repr(h)) self.loop.call_soon(cb, 1) self.loop.run_forever() self.assertEqual(calls, [10, 1]) def test_call_soon_2(self): waiter = self.loop.create_future() waiter_r = weakref.ref(waiter) self.loop.call_soon(lambda f: f.set_result(None), waiter) self.loop.run_until_complete(waiter) del waiter self.assertIsNone(waiter_r()) def test_call_soon_3(self): waiter = self.loop.create_future() waiter_r = weakref.ref(waiter) self.loop.call_soon(lambda f=waiter: f.set_result(None)) self.loop.run_until_complete(waiter) del waiter self.assertIsNone(waiter_r()) def test_call_soon_base_exc(self): def cb(): raise KeyboardInterrupt() self.loop.call_soon(cb) with self.assertRaises(KeyboardInterrupt): self.loop.run_forever() self.assertFalse(self.loop.is_closed()) def test_calls_debug_reporting(self): def run_test(debug, meth, stack_adj): context = None def handler(loop, ctx): nonlocal context context = ctx self.loop.set_debug(debug) self.loop.set_exception_handler(handler) def cb(): 1 / 0 meth(cb) self.assertIsNone(context) self.loop.run_until_complete(asyncio.sleep(0.05)) self.assertIs(type(context['exception']), ZeroDivisionError) self.assertTrue(context['message'].startswith( 'Exception in callback')) if debug: tb = context['source_traceback'] self.assertEqual(tb[-1 + stack_adj].name, 'run_test') else: self.assertFalse('source_traceback' in context) del context for debug in (True, False): for meth_name, meth, stack_adj in ( ('call_soon', self.loop.call_soon, 0), ('call_later', # `-1` accounts for lambda lambda *args: self.loop.call_later(0.01, *args), -1) ): with self.subTest(debug=debug, meth_name=meth_name): run_test(debug, meth, stack_adj) def test_now_update(self): async def run(): st = self.loop.time() time.sleep(0.05) return self.loop.time() - st delta = self.loop.run_until_complete(run()) self.assertTrue(delta > 0.049 and delta < 0.6) def test_call_later_1(self): calls = [] def cb(inc=10, stop=False): calls.append(inc) self.assertTrue(self.loop.is_running()) if stop: self.loop.call_soon(self.loop.stop) self.loop.call_later(0.05, cb) # canceled right away h = self.loop.call_later(0.05, cb, 100, True) self.assertIn('.cb', repr(h)) h.cancel() self.assertIn('cancelled', repr(h)) self.loop.call_later(0.05, cb, 1, True) self.loop.call_later(1000, cb, 1000) # shouldn't be called started = time.monotonic() self.loop.run_forever() finished = time.monotonic() self.assertEqual(calls, [10, 1]) self.assertFalse(self.loop.is_running()) self.assertLess(finished - started, 0.1) self.assertGreater(finished - started, 0.04) def test_call_later_2(self): # Test that loop.call_later triggers an update of # libuv cached time. async def main(): await asyncio.sleep(0.001) time.sleep(0.01) await asyncio.sleep(0.01) started = time.monotonic() self.loop.run_until_complete(main()) delta = time.monotonic() - started self.assertGreater(delta, 0.019) def test_call_later_3(self): # a memory leak regression test waiter = self.loop.create_future() waiter_r = weakref.ref(waiter) self.loop.call_later(0.01, lambda f: f.set_result(None), waiter) self.loop.run_until_complete(waiter) del waiter self.assertIsNone(waiter_r()) def test_call_later_4(self): # a memory leak regression test waiter = self.loop.create_future() waiter_r = weakref.ref(waiter) self.loop.call_later(0.01, lambda f=waiter: f.set_result(None)) self.loop.run_until_complete(waiter) del waiter self.assertIsNone(waiter_r()) def test_call_later_negative(self): calls = [] def cb(arg): calls.append(arg) self.loop.stop() self.loop.call_later(-1, cb, 'a') self.loop.run_forever() self.assertEqual(calls, ['a']) def test_call_later_rounding(self): # Refs #233, call_later() and call_at() shouldn't call cb early def cb(): self.loop.stop() for i in range(8): self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999 started = int(round(self.loop.time() * 1000)) self.loop.run_forever() finished = int(round(self.loop.time() * 1000)) self.assertGreaterEqual(finished - started, 69) def test_call_at(self): if os.environ.get('TRAVIS_OS_NAME'): # Time seems to be really unpredictable on Travis. raise unittest.SkipTest('time is not monotonic on Travis') i = 0 def cb(inc): nonlocal i i += inc self.loop.stop() at = self.loop.time() + 0.05 self.loop.call_at(at, cb, 100).cancel() self.loop.call_at(at, cb, 10) started = time.monotonic() self.loop.run_forever() finished = time.monotonic() self.assertEqual(i, 10) self.assertLess(finished - started, 0.07) self.assertGreater(finished - started, 0.045) def test_check_thread(self): def check_thread(loop, debug): def cb(): pass loop.set_debug(debug) if debug: msg = ("Non-thread-safe operation invoked on an " "event loop other than the current one") with self.assertRaisesRegex(RuntimeError, msg): loop.call_soon(cb) with self.assertRaisesRegex(RuntimeError, msg): loop.call_later(60, cb) with self.assertRaisesRegex(RuntimeError, msg): loop.call_at(loop.time() + 60, cb) else: loop.call_soon(cb) loop.call_later(60, cb) loop.call_at(loop.time() + 60, cb) def check_in_thread(loop, event, debug, create_loop, fut): # wait until the event loop is running event.wait() try: if create_loop: loop2 = self.new_loop() try: asyncio.set_event_loop(loop2) check_thread(loop, debug) finally: asyncio.set_event_loop(None) loop2.close() else: check_thread(loop, debug) except Exception as exc: loop.call_soon_threadsafe(fut.set_exception, exc) else: loop.call_soon_threadsafe(fut.set_result, None) def test_thread(loop, debug, create_loop=False): event = threading.Event() fut = asyncio.Future(loop=loop) loop.call_soon(event.set) args = (loop, event, debug, create_loop, fut) thread = threading.Thread(target=check_in_thread, args=args) thread.start() loop.run_until_complete(fut) thread.join() # raise RuntimeError if the thread has no event loop test_thread(self.loop, True) # check disabled if debug mode is disabled test_thread(self.loop, False) # raise RuntimeError if the event loop of the thread is not the called # event loop test_thread(self.loop, True, create_loop=True) # check disabled if debug mode is disabled test_thread(self.loop, False, create_loop=True) def test_run_once_in_executor_plain(self): called = [] def cb(arg): called.append(arg) async def runner(): await self.loop.run_in_executor(None, cb, 'a') self.loop.run_until_complete(runner()) self.assertEqual(called, ['a']) def test_set_debug(self): self.loop.set_debug(True) self.assertTrue(self.loop.get_debug()) self.loop.set_debug(False) self.assertFalse(self.loop.get_debug()) def test_run_until_complete_type_error(self): self.assertRaises( TypeError, self.loop.run_until_complete, 'blah') def test_run_until_complete_loop(self): task = asyncio.Future() other_loop = self.new_loop() self.addCleanup(other_loop.close) self.assertRaises( ValueError, other_loop.run_until_complete, task) def test_run_until_complete_error(self): async def foo(): raise ValueError('aaa') with self.assertRaisesRegex(ValueError, 'aaa'): self.loop.run_until_complete(foo()) def test_run_until_complete_loop_orphan_future_close_loop(self): if self.implementation == 'asyncio' and sys.version_info < (3, 6, 2): raise unittest.SkipTest('unfixed asyncio') async def foo(delay): await asyncio.sleep(delay) def throw(): raise KeyboardInterrupt self.loop.call_soon(throw) try: self.loop.run_until_complete(foo(0.1)) except KeyboardInterrupt: pass # This call fails if run_until_complete does not clean up # done-callback for the previous future. self.loop.run_until_complete(foo(0.2)) def test_debug_slow_callbacks(self): logger = logging.getLogger('asyncio') self.loop.set_debug(True) self.loop.slow_callback_duration = 0.2 self.loop.call_soon(lambda: time.sleep(0.3)) with mock.patch.object(logger, 'warning') as log: self.loop.run_until_complete(asyncio.sleep(0)) self.assertEqual(log.call_count, 1) # format message msg = log.call_args[0][0] % log.call_args[0][1:] self.assertIn('Executing <Handle', msg) self.assertIn('test_debug_slow_callbacks', msg) def test_debug_slow_timer_callbacks(self): logger = logging.getLogger('asyncio') self.loop.set_debug(True) self.loop.slow_callback_duration = 0.2 self.loop.call_later(0.01, lambda: time.sleep(0.3)) with mock.patch.object(logger, 'warning') as log: self.loop.run_until_complete(asyncio.sleep(0.02)) self.assertEqual(log.call_count, 1) # format message msg = log.call_args[0][0] % log.call_args[0][1:] self.assertIn('Executing <TimerHandle', msg) self.assertIn('test_debug_slow_timer_callbacks', msg) def test_debug_slow_task_callbacks(self): logger = logging.getLogger('asyncio') self.loop.set_debug(True) self.loop.slow_callback_duration = 0.2 async def foo(): time.sleep(0.3) with mock.patch.object(logger, 'warning') as log: self.loop.run_until_complete(foo()) self.assertEqual(log.call_count, 1) # format message msg = log.call_args[0][0] % log.call_args[0][1:] self.assertIn('Executing <Task finished', msg) self.assertIn('test_debug_slow_task_callbacks', msg) def test_default_exc_handler_callback(self): self.loop.set_exception_handler(None) self.loop._process_events = mock.Mock() def zero_error(fut): fut.set_result(True) 1 / 0 logger = logging.getLogger('asyncio') # Test call_soon (events.Handle) with mock.patch.object(logger, 'error') as log: fut = asyncio.Future() self.loop.call_soon(zero_error, fut) fut.add_done_callback(lambda fut: self.loop.stop()) self.loop.run_forever() log.assert_called_with( self.mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY) # Test call_later (events.TimerHandle) with mock.patch.object(logger, 'error') as log: fut = asyncio.Future() self.loop.call_later(0.01, zero_error, fut) fut.add_done_callback(lambda fut: self.loop.stop()) self.loop.run_forever() log.assert_called_with( self.mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY) def test_set_exc_handler_custom(self): self.loop.set_exception_handler(None) logger = logging.getLogger('asyncio') def run_loop(): def zero_error(): self.loop.stop() 1 / 0 self.loop.call_soon(zero_error) self.loop.run_forever() errors = [] def handler(loop, exc): errors.append(exc) self.loop.set_debug(True) if hasattr(self.loop, 'get_exception_handler'): # Available since Python 3.5.2 self.assertIsNone(self.loop.get_exception_handler()) self.loop.set_exception_handler(handler) if hasattr(self.loop, 'get_exception_handler'): self.assertIs(self.loop.get_exception_handler(), handler) run_loop() self.assertEqual(len(errors), 1) self.assertRegex(errors[-1]['message'], 'Exception in callback.*zero_error') self.loop.set_exception_handler(None) with mock.patch.object(logger, 'error') as log: run_loop() log.assert_called_with( self.mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY) self.assertEqual(len(errors), 1) def test_set_exc_handler_broken(self): logger = logging.getLogger('asyncio') def run_loop(): def zero_error(): self.loop.stop() 1 / 0 self.loop.call_soon(zero_error) self.loop.run_forever() def handler(loop, context): raise AttributeError('spam') self.loop._process_events = mock.Mock() self.loop.set_exception_handler(handler) with mock.patch.object(logger, 'error') as log: run_loop() log.assert_called_with( self.mock_pattern('Unhandled error in exception handler'), exc_info=mock.ANY) def test_set_task_factory_invalid(self): with self.assertRaisesRegex( TypeError, 'task factory must be a callable or None'): self.loop.set_task_factory(1) self.assertIsNone(self.loop.get_task_factory()) def test_set_task_factory(self): self.loop._process_events = mock.Mock() class MyTask(asyncio.Task): pass async def coro(): pass factory = lambda loop, coro: MyTask(coro, loop=loop) self.assertIsNone(self.loop.get_task_factory()) self.loop.set_task_factory(factory) self.assertIs(self.loop.get_task_factory(), factory) task = self.loop.create_task(coro()) self.assertTrue(isinstance(task, MyTask)) self.loop.run_until_complete(task) self.loop.set_task_factory(None) self.assertIsNone(self.loop.get_task_factory()) task = self.loop.create_task(coro()) self.assertTrue(isinstance(task, asyncio.Task)) self.assertFalse(isinstance(task, MyTask)) self.loop.run_until_complete(task) def _compile_agen(self, src): try: g = {} exec(src, globals(), g) except SyntaxError: # Python < 3.6 raise unittest.SkipTest() else: return g['waiter'] def test_shutdown_asyncgens_01(self): finalized = list() if not hasattr(self.loop, 'shutdown_asyncgens'): raise unittest.SkipTest() waiter = self._compile_agen( '''async def waiter(timeout, finalized): try: await asyncio.sleep(timeout) yield 1 finally: await asyncio.sleep(0) finalized.append(1) ''') async def wait(): async for _ in waiter(1, finalized): pass t1 = self.loop.create_task(wait()) t2 = self.loop.create_task(wait()) self.loop.run_until_complete(asyncio.sleep(0.1)) t1.cancel() t2.cancel() self.loop.run_until_complete(self.loop.shutdown_asyncgens()) self.assertEqual(finalized, [1, 1]) for t in {t1, t2}: try: self.loop.run_until_complete(t) except asyncio.CancelledError: pass def test_shutdown_asyncgens_02(self): if not hasattr(self.loop, 'shutdown_asyncgens'): raise unittest.SkipTest() logged = 0 def logger(loop, context): nonlocal logged self.assertIn('asyncgen', context) expected = 'an error occurred during closing of asynchronous' if expected in context['message']: logged += 1 waiter = self._compile_agen('''async def waiter(timeout): try: await asyncio.sleep(timeout) yield 1 finally: 1 / 0 ''') async def wait(): async for _ in waiter(1): pass t = self.loop.create_task(wait()) self.loop.run_until_complete(asyncio.sleep(0.1)) self.loop.set_exception_handler(logger) self.loop.run_until_complete(self.loop.shutdown_asyncgens()) self.assertEqual(logged, 1) # Silence warnings t.cancel() self.loop.run_until_complete(asyncio.sleep(0.1)) def test_shutdown_asyncgens_03(self): if not hasattr(self.loop, 'shutdown_asyncgens'): raise unittest.SkipTest() waiter = self._compile_agen('''async def waiter(): yield 1 yield 2 ''') async def foo(): # We specifically want to hit _asyncgen_finalizer_hook # method. await waiter().asend(None) self.loop.run_until_complete(foo()) self.loop.run_until_complete(asyncio.sleep(0.01)) def test_inf_wait_for(self): async def foo(): await asyncio.sleep(0.1) return 123 res = self.loop.run_until_complete( asyncio.wait_for(foo(), timeout=float('inf'))) self.assertEqual(res, 123) class TestBaseUV(_TestBase, UVTestCase): def test_loop_create_future(self): fut = self.loop.create_future() self.assertTrue(isinstance(fut, asyncio.Future)) self.assertIs(fut._loop, self.loop) fut.cancel() def test_loop_call_soon_handle_cancelled(self): cb = lambda: False # NoQA handle = self.loop.call_soon(cb) self.assertFalse(handle.cancelled()) handle.cancel() self.assertTrue(handle.cancelled()) handle = self.loop.call_soon(cb) self.assertFalse(handle.cancelled()) self.run_loop_briefly() self.assertFalse(handle.cancelled()) def test_loop_call_later_handle_cancelled(self): cb = lambda: False # NoQA handle = self.loop.call_later(0.01, cb) self.assertFalse(handle.cancelled()) handle.cancel() self.assertTrue(handle.cancelled()) handle = self.loop.call_later(0.01, cb) self.assertFalse(handle.cancelled()) self.run_loop_briefly(delay=0.05) self.assertFalse(handle.cancelled()) def test_loop_std_files_cloexec(self): # See https://github.com/MagicStack/uvloop/issues/40 for details. for fd in {0, 1, 2}: flags = fcntl.fcntl(fd, fcntl.F_GETFD) self.assertFalse(flags & fcntl.FD_CLOEXEC) def test_default_exc_handler_broken(self): logger = logging.getLogger('asyncio') _context = None class Loop(uvloop.Loop): _selector = mock.Mock() _process_events = mock.Mock() def default_exception_handler(self, context): nonlocal _context _context = context # Simulates custom buggy "default_exception_handler" raise ValueError('spam') loop = Loop() self.addCleanup(loop.close) self.addCleanup(lambda: asyncio.set_event_loop(None)) asyncio.set_event_loop(loop) def run_loop(): def zero_error(): loop.stop() 1 / 0 loop.call_soon(zero_error) loop.run_forever() with mock.patch.object(logger, 'error') as log: run_loop() log.assert_called_with( 'Exception in default exception handler', exc_info=True) def custom_handler(loop, context): raise ValueError('ham') _context = None loop.set_exception_handler(custom_handler) with mock.patch.object(logger, 'error') as log: run_loop() log.assert_called_with( self.mock_pattern('Exception in default exception.*' 'while handling.*in custom'), exc_info=True) # Check that original context was passed to default # exception handler. self.assertIn('context', _context) self.assertIs(type(_context['context']['exception']), ZeroDivisionError) def test_big_call_later_timeout(self): OK, NOT_OK = 0, 0 async def sleep(delay_name, delay): nonlocal OK, NOT_OK try: await asyncio.sleep(delay) except asyncio.CancelledError: OK += 1 except Exception: NOT_OK += 1 async def main(): tests = [ sleep("infinity", float("inf")), sleep("sys.maxsize", float(sys.maxsize)), sleep("sys.maxsize", sys.maxsize), sleep("2**55", 2**55), sleep("2**54", 2**54), ] tasks = [self.loop.create_task(test) for test in tests] await asyncio.sleep(0.1) for task in tasks: task.cancel() await task self.loop.run_until_complete(main()) self.assertEqual(OK, 5) self.assertEqual(NOT_OK, 0) class TestBaseAIO(_TestBase, AIOTestCase): pass class TestPolicy(unittest.TestCase): def test_uvloop_policy(self): try: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop = asyncio.new_event_loop() try: self.assertIsInstance(loop, uvloop.Loop) finally: loop.close() finally: asyncio.set_event_loop_policy(None) @unittest.skipUnless(hasattr(asyncio, '_get_running_loop'), 'No asyncio._get_running_loop') def test_running_loop_within_a_loop(self): async def runner(loop): loop.run_forever() try: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop = asyncio.new_event_loop() outer_loop = asyncio.new_event_loop() try: with self.assertRaisesRegex(RuntimeError, 'while another loop is running'): outer_loop.run_until_complete(runner(loop)) finally: loop.close() outer_loop.close() finally: asyncio.set_event_loop_policy(None) @unittest.skipUnless(hasattr(asyncio, '_get_running_loop'), 'No asyncio._get_running_loop') def test_get_event_loop_returns_running_loop(self): class Policy(asyncio.DefaultEventLoopPolicy): def get_event_loop(self): raise NotImplementedError loop = None old_policy = asyncio.get_event_loop_policy() try: asyncio.set_event_loop_policy(Policy()) loop = uvloop.new_event_loop() self.assertIs(asyncio._get_running_loop(), None) async def func(): self.assertIs(asyncio.get_event_loop(), loop) self.assertIs(asyncio._get_running_loop(), loop) loop.run_until_complete(func()) finally: asyncio.set_event_loop_policy(old_policy) if loop is not None: loop.close() self.assertIs(asyncio._get_running_loop(), None)
test_smtpserver.py
# Copyright The IETF Trust 2014-2019, All Rights Reserved # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import smtpd import threading import asyncore import six import debug # pyflakes:ignore class AsyncCoreLoopThread(object): def wrap_loop(self, exit_condition, timeout=1.0, use_poll=False, map=None): if map is None: map = asyncore.socket_map while map and not exit_condition: asyncore.loop(timeout=1.0, use_poll=False, map=map, count=1) def start(self): """Start the listening service""" self.exit_condition = [] kwargs={'exit_condition':self.exit_condition,'timeout':1.0} self.thread = threading.Thread(target=self.wrap_loop, kwargs=kwargs) self.thread.daemon = True self.thread.daemon = True self.thread.start() def stop(self): """Stop the listening service""" self.exit_condition.append(True) self.thread.join() class SMTPTestChannel(smtpd.SMTPChannel): # mail_options = ['BODY=8BITMIME', 'SMTPUTF8'] def smtp_RCPT(self, arg): if (six.PY2 and not self._SMTPChannel__mailfrom) or (six.PY3 and not self.mailfrom): self.push(str('503 Error: need MAIL command')) return if six.PY2: address = self._SMTPChannel__getaddr('TO:', arg) if arg else None else: arg = self._strip_command_keyword('TO:', arg) address, __ = self._getaddr(arg) if not address: self.push(str('501 Syntax: RCPT TO: <address>')) return if "poison" in address: self.push(str('550 Error: Not touching that')) return if six.PY2: self._SMTPChannel__rcpttos.append(address) else: self.rcpt_options = [] self.rcpttos.append(address) self.push(str('250 Ok')) class SMTPTestServer(smtpd.SMTPServer): def __init__(self,localaddr,remoteaddr,inbox): if inbox is not None: self.inbox=inbox else: self.inbox = [] smtpd.SMTPServer.__init__(self,localaddr,remoteaddr) def handle_accept(self): pair = self.accept() if pair is not None: conn, addr = pair #channel = SMTPTestChannel(self, conn, addr) SMTPTestChannel(self, conn, addr) def process_message(self, peer, mailfrom, rcpttos, data, mail_options=[], rcpt_options=[]): self.inbox.append(data) class SMTPTestServerDriver(object): def __init__(self, localaddr, remoteaddr, inbox=None): self.localaddr=localaddr self.remoteaddr=remoteaddr if inbox is not None: self.inbox = inbox else: self.inbox = [] self.thread_driver = None def start(self): self.smtpserver = SMTPTestServer(self.localaddr,self.remoteaddr,self.inbox) self.thread_driver = AsyncCoreLoopThread() self.thread_driver.start() def stop(self): if self.thread_driver: self.thread_driver.stop()
installwizard.py
from functools import partial import threading from kivy.app import App from kivy.clock import Clock from kivy.lang import Builder from kivy.properties import ObjectProperty, StringProperty, OptionProperty from kivy.core.window import Window from kivy.uix.button import Button from kivy.utils import platform from kivy.uix.widget import Widget from kivy.core.window import Window from kivy.clock import Clock from kivy.utils import platform from electrum_ltc_gui.kivy.uix.dialogs import EventsDialog from electrum_ltc_gui.kivy.i18n import _ from electrum_ltc.base_wizard import BaseWizard from password_dialog import PasswordDialog # global Variables app = App.get_running_app() is_test = (platform == "linux") test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve" test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL" Builder.load_string(''' #:import Window kivy.core.window.Window #:import _ reddelectrum_gui.kivy.i18n._ <WizardTextInput@TextInput> border: 4, 4, 4, 4 font_size: '15sp' padding: '15dp', '15dp' background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1) foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1) hint_text_color: self.foreground_color background_active: 'atlas://gui/kivy/theming/light/create_act_text_active' background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active' size_hint_y: None height: '48sp' <WizardButton@Button>: root: None size_hint: 1, None height: '48sp' on_press: if self.root: self.root.dispatch('on_press', self) on_release: if self.root: self.root.dispatch('on_release', self) <BigLabel@Label> color: .854, .925, .984, 1 size_hint: 1, None text_size: self.width, None height: self.texture_size[1] bold: True <-WizardDialog> text_color: .854, .925, .984, 1 value: '' #auto_dismiss: False size_hint: None, None canvas.before: Color: rgba: 0, 0, 0, .9 Rectangle: size: Window.size Color: rgba: .239, .588, .882, 1 Rectangle: size: Window.size crcontent: crcontent # add electrum icon BoxLayout: orientation: 'vertical' if self.width < self.height else 'horizontal' padding: min(dp(27), self.width/32), min(dp(27), self.height/32),\ min(dp(27), self.width/32), min(dp(27), self.height/32) spacing: '10dp' GridLayout: id: grid_logo cols: 1 pos_hint: {'center_y': .5} size_hint: 1, None height: self.minimum_height Label: color: root.text_color text: 'ELECTRUM' size_hint: 1, None height: self.texture_size[1] if self.opacity else 0 font_size: '33sp' font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf' GridLayout: cols: 1 id: crcontent spacing: '1dp' Widget: size_hint: 1, 0.3 GridLayout: rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: id: back text: _('Back') root: root WizardButton: id: next text: _('Next') root: root disabled: root.value == '' <WizardMultisigDialog> value: 'next' Widget size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: _("Choose the number of signatures needed to unlock funds in your wallet") Widget size_hint: 1, 1 GridLayout: orientation: 'vertical' cols: 2 spacing: '14dp' size_hint: 1, 1 height: self.minimum_height Label: color: root.text_color text: _('From %d cosigners')%n.value Slider: id: n range: 2, 5 step: 1 value: 2 Label: color: root.text_color text: _('Require %d signatures')%m.value Slider: id: m range: 1, n.value step: 1 value: 2 <WizardChoiceDialog> message : '' Widget: size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message Widget size_hint: 1, 1 GridLayout: row_default_height: '48dp' orientation: 'vertical' id: choices cols: 1 spacing: '14dp' size_hint: 1, None <MButton@Button>: size_hint: 1, None height: '33dp' on_release: self.parent.update_amount(self.text) <WordButton@Button>: size_hint: None, None padding: '5dp', '5dp' text_size: None, self.height width: self.texture_size[0] height: '30dp' on_release: self.parent.new_word(self.text) <SeedButton@Button>: height: dp(100) border: 4, 4, 4, 4 halign: 'justify' valign: 'top' font_size: '18dp' text_size: self.width - dp(24), self.height - dp(12) color: .1, .1, .1, 1 background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top' background_down: self.background_normal size_hint_y: None <SeedLabel@Label>: font_size: '12sp' text_size: self.width, None size_hint: 1, None height: self.texture_size[1] halign: 'justify' valign: 'middle' border: 4, 4, 4, 4 <RestoreSeedDialog> message: '' word: '' BigLabel: text: "ENTER YOUR SEED PHRASE" GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input_seed text: '' on_text: Clock.schedule_once(root.on_text) on_release: root.options_dialog() SeedLabel: text: root.message BoxLayout: id: suggestions height: '35dp' size_hint: 1, None new_word: root.on_word BoxLayout: id: line1 update_amount: root.update_text size_hint: 1, None height: '30dp' MButton: text: 'Q' MButton: text: 'W' MButton: text: 'E' MButton: text: 'R' MButton: text: 'T' MButton: text: 'Y' MButton: text: 'U' MButton: text: 'I' MButton: text: 'O' MButton: text: 'P' BoxLayout: id: line2 update_amount: root.update_text size_hint: 1, None height: '30dp' Widget: size_hint: 0.5, None height: '33dp' MButton: text: 'A' MButton: text: 'S' MButton: text: 'D' MButton: text: 'F' MButton: text: 'G' MButton: text: 'H' MButton: text: 'J' MButton: text: 'K' MButton: text: 'L' Widget: size_hint: 0.5, None height: '33dp' BoxLayout: id: line3 update_amount: root.update_text size_hint: 1, None height: '30dp' Widget: size_hint: 1, None MButton: text: 'Z' MButton: text: 'X' MButton: text: 'C' MButton: text: 'V' MButton: text: 'B' MButton: text: 'N' MButton: text: 'M' MButton: text: ' ' MButton: text: '<' <AddXpubDialog> title: '' message: '' BigLabel: text: root.title GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input text: '' on_text: Clock.schedule_once(root.check_text) SeedLabel: text: root.message GridLayout rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height IconButton: id: scan height: '48sp' on_release: root.scan_xpub() icon: 'atlas://gui/kivy/theming/light/camera' size_hint: 1, None WizardButton: text: _('Paste') on_release: root.do_paste() WizardButton: text: _('Clear') on_release: root.do_clear() <ShowXpubDialog> xpub: '' message: _('Here is your master public key. Share it with your cosigners.') BigLabel: text: "MASTER PUBLIC KEY" GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input text: root.xpub SeedLabel: text: root.message GridLayout rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: text: _('QR code') on_release: root.do_qr() WizardButton: text: _('Copy') on_release: root.do_copy() WizardButton: text: _('Share') on_release: root.do_share() <ShowSeedDialog> spacing: '12dp' value: 'next' BigLabel: text: "PLEASE WRITE DOWN YOUR SEED PHRASE" GridLayout: id: grid cols: 1 pos_hint: {'center_y': .5} size_hint_y: None height: self.minimum_height orientation: 'vertical' spacing: '12dp' SeedButton: text: root.seed_text on_release: root.options_dialog() SeedLabel: text: root.message <LineDialog> BigLabel: text: root.title SeedLabel: text: root.message TextInput: id: passphrase_input multiline: False size_hint: 1, None height: '27dp' SeedLabel: text: root.warning ''') class WizardDialog(EventsDialog): ''' Abstract dialog to be used as the base for all Create Account Dialogs ''' crcontent = ObjectProperty(None) def __init__(self, wizard, **kwargs): super(WizardDialog, self).__init__(**kwargs) self.wizard = wizard self.ids.back.disabled = not wizard.can_go_back() self.app = App.get_running_app() self.run_next = kwargs['run_next'] _trigger_size_dialog = Clock.create_trigger(self._size_dialog) Window.bind(size=_trigger_size_dialog, rotation=_trigger_size_dialog) _trigger_size_dialog() self._on_release = False def _size_dialog(self, dt): app = App.get_running_app() if app.ui_mode[0] == 'p': self.size = Window.size else: #tablet if app.orientation[0] == 'p': #portrait self.size = Window.size[0]/1.67, Window.size[1]/1.4 else: self.size = Window.size[0]/2.5, Window.size[1] def add_widget(self, widget, index=0): if not self.crcontent: super(WizardDialog, self).add_widget(widget) else: self.crcontent.add_widget(widget, index=index) def on_dismiss(self): app = App.get_running_app() if app.wallet is None and not self._on_release: app.stop() def get_params(self, button): return (None,) def on_release(self, button): self._on_release = True self.close() if not button: self.parent.dispatch('on_wizard_complete', None) return if button is self.ids.back: self.wizard.go_back() return params = self.get_params(button) self.run_next(*params) class WizardMultisigDialog(WizardDialog): def get_params(self, button): m = self.ids.m.value n = self.ids.n.value return m, n class WizardChoiceDialog(WizardDialog): def __init__(self, wizard, **kwargs): super(WizardChoiceDialog, self).__init__(wizard, **kwargs) self.message = kwargs.get('message', '') choices = kwargs.get('choices', []) layout = self.ids.choices layout.bind(minimum_height=layout.setter('height')) for action, text in choices: l = WizardButton(text=text) l.action = action l.height = '48dp' l.root = self layout.add_widget(l) def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(app.dispatch, 'on_back') def get_params(self, button): return (button.action,) class LineDialog(WizardDialog): title = StringProperty('') message = StringProperty('') warning = StringProperty('') def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.ids.next.disabled = False def get_params(self, b): return (self.ids.passphrase_input.text,) class ShowSeedDialog(WizardDialog): seed_text = StringProperty('') message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.") ext = False def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(self.ids.back.dispatch, 'on_release') def options_dialog(self): from seed_options import SeedOptionsDialog def callback(status): self.ext = status d = SeedOptionsDialog(self.ext, callback) d.open() def get_params(self, b): return (self.ext,) class WordButton(Button): pass class WizardButton(Button): pass class RestoreSeedDialog(WizardDialog): def __init__(self, wizard, **kwargs): super(RestoreSeedDialog, self).__init__(wizard, **kwargs) self._test = kwargs['test'] from reddelectrum.mnemonic import Mnemonic from reddelectrum.old_mnemonic import words as old_wordlist self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist)) self.ids.text_input_seed.text = test_seed if is_test else '' self.message = _('Please type your seed phrase using the virtual keyboard.') self.title = _('Enter Seed') self.ext = False def options_dialog(self): from seed_options import SeedOptionsDialog def callback(status): self.ext = status d = SeedOptionsDialog(self.ext, callback) d.open() def get_suggestions(self, prefix): for w in self.words: if w.startswith(prefix): yield w def on_text(self, dt): self.ids.next.disabled = not bool(self._test(self.get_text())) text = self.ids.text_input_seed.text if not text: last_word = '' elif text[-1] == ' ': last_word = '' else: last_word = text.split(' ')[-1] enable_space = False self.ids.suggestions.clear_widgets() suggestions = [x for x in self.get_suggestions(last_word)] if last_word in suggestions: b = WordButton(text=last_word) self.ids.suggestions.add_widget(b) enable_space = True for w in suggestions: if w != last_word and len(suggestions) < 10: b = WordButton(text=w) self.ids.suggestions.add_widget(b) i = len(last_word) p = set() for x in suggestions: if len(x)>i: p.add(x[i]) for line in [self.ids.line1, self.ids.line2, self.ids.line3]: for c in line.children: if isinstance(c, Button): if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': c.disabled = (c.text.lower() not in p) and last_word elif c.text == ' ': c.disabled = not enable_space def on_word(self, w): text = self.get_text() words = text.split(' ') words[-1] = w text = ' '.join(words) self.ids.text_input_seed.text = text + ' ' self.ids.suggestions.clear_widgets() def get_text(self): ti = self.ids.text_input_seed text = unicode(ti.text).strip() text = ' '.join(text.split()) return text def update_text(self, c): c = c.lower() text = self.ids.text_input_seed.text if c == '<': text = text[:-1] else: text += c self.ids.text_input_seed.text = text def on_parent(self, instance, value): if value: tis = self.ids.text_input_seed tis.focus = True #tis._keyboard.bind(on_key_down=self.on_key_down) self._back = _back = partial(self.ids.back.dispatch, 'on_release') app = App.get_running_app() def on_key_down(self, keyboard, keycode, key, modifiers): if keycode[0] in (13, 271): self.on_enter() return True def on_enter(self): #self._remove_keyboard() # press next next = self.ids.next if not next.disabled: next.dispatch('on_release') def _remove_keyboard(self): tis = self.ids.text_input_seed if tis._keyboard: tis._keyboard.unbind(on_key_down=self.on_key_down) tis.focus = False def get_params(self, b): return (self.get_text(), False, self.ext) class ConfirmSeedDialog(RestoreSeedDialog): def get_params(self, b): return (self.get_text(),) def options_dialog(self): pass class ShowXpubDialog(WizardDialog): def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.xpub = kwargs['xpub'] self.ids.next.disabled = False def do_copy(self): self.app._clipboard.copy(self.xpub) def do_share(self): self.app.do_share(self.xpub, _("Master Public Key")) def do_qr(self): from qr_dialog import QRDialog popup = QRDialog(_("Master Public Key"), self.xpub, True) popup.open() class AddXpubDialog(WizardDialog): def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.is_valid = kwargs['is_valid'] self.title = kwargs['title'] self.message = kwargs['message'] def check_text(self, dt): self.ids.next.disabled = not bool(self.is_valid(self.get_text())) def get_text(self): ti = self.ids.text_input return unicode(ti.text).strip() def get_params(self, button): return (self.get_text(),) def scan_xpub(self): def on_complete(text): self.ids.text_input.text = text self.app.scan_qr(on_complete) def do_paste(self): self.ids.text_input.text = test_xpub if is_test else unicode(self.app._clipboard.paste()) def do_clear(self): self.ids.text_input.text = '' class InstallWizard(BaseWizard, Widget): ''' events:: `on_wizard_complete` Fired when the wizard is done creating/ restoring wallet/s. ''' __events__ = ('on_wizard_complete', ) def on_wizard_complete(self, wallet): """overriden by main_window""" pass def waiting_dialog(self, task, msg): '''Perform a blocking task in the background by running the passed method in a thread. ''' def target(): # run your threaded function try: task() except Exception as err: self.show_error(str(err)) # on completion hide message Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1) app.show_info_bubble( text=msg, icon='atlas://gui/kivy/theming/light/important', pos=Window.center, width='200sp', arrow_pos=None, modal=True) t = threading.Thread(target = target) t.start() def terminate(self, **kwargs): self.dispatch('on_wizard_complete', self.wallet) def choice_dialog(self, **kwargs): choices = kwargs['choices'] if len(choices) > 1: WizardChoiceDialog(self, **kwargs).open() else: f = kwargs['run_next'] f(choices[0][0]) def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open() def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open() def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open() def confirm_seed_dialog(self, **kwargs): kwargs['title'] = _('Confirm Seed') kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it') ConfirmSeedDialog(self, **kwargs).open() def restore_seed_dialog(self, **kwargs): RestoreSeedDialog(self, **kwargs).open() def add_xpub_dialog(self, **kwargs): kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.') AddXpubDialog(self, **kwargs).open() def add_cosigner_dialog(self, **kwargs): kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index'] kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.') AddXpubDialog(self, **kwargs).open() def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open() def show_error(self, msg): Clock.schedule_once(lambda dt: app.show_error(msg)) def password_dialog(self, message, callback): popup = PasswordDialog() popup.init(message, callback) popup.open() def request_password(self, run_next): def callback(pin): if pin: self.run('confirm_password', pin, run_next) else: run_next(None) self.password_dialog('Choose a PIN code', callback) def confirm_password(self, pin, run_next): def callback(conf): if conf == pin: run_next(pin, False) else: self.show_error(_('PIN mismatch')) self.run('request_password', run_next) self.password_dialog('Confirm your PIN code', callback) def action_dialog(self, action, run_next): f = getattr(self, action) f()
views.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import torch CUDA_LAUNCH_BLOCKING = "1" import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" import multiprocessing import random from threading import Thread import botocore from django.contrib import auth from django.contrib.auth import authenticate from django.core.files.uploadedfile import UploadedFile from django.shortcuts import render from django.template import RequestContext from django.utils.datetime_safe import datetime from django.views.decorators.csrf import csrf_exempt import json from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer as wn from django.http import HttpResponse, JsonResponse import re from openpyxl import load_workbook from openpyxl.writer.excel import save_virtual_workbook from apollo4.ComputeOptimalParameters import getOptimalParameterForMNB_alpha, getOptimalParameterForLR_alpha, \ getOptimalParameterForSVM_alpha, getOptimalParameterForOVRMNB_alpha, getOptimalParameterForOVRLR_alpha, \ getOptimalParameterForOVRSVM_alpha, getOptimalParametersForDeepLearning, getBestModelAndHyperParameters from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import SGDClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.multiclass import OneVsRestClassifier from sklearn.cluster import MiniBatchKMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.model_selection import KFold from sklearn import preprocessing from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import pandas as pd import numpy as np import copy from dateutil.relativedelta import relativedelta import os, sys, signal from os.path import splitext import uuid import apollo4.globals import boto3 from elasticsearch import Elasticsearch from apollo4.connection import MyConnection import pickle from sklearn.metrics.pairwise import cosine_similarity from sklearn.decomposition import TruncatedSVD from zipfile import ZipFile import io from django.contrib.auth.decorators import login_required from django.conf import settings from apollo4.secondviews import Process_All_Files from apollo4.DeepLearningModel import DeepLearningModel from collections import Counter HOST_URLS = ["https://search-apollo4-5xxq4s5dugv4fenh4e2bbt3xmi.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. # if os.name == 'nt': # # # In local server uncomment the below code and comment above es_conn. # # HOST_URLS = ["https://search-apollolocal-yrsi6dysaadh7xyeotkeeoybqu.us-east-1.es.amazonaws.com"] # es_conn = Elasticsearch(HOST_URLS, connection_class=MyConnection, # proxies={'https': 'http://root:Samsung1%21@105.128.219.200:8080'},timeout=30) # os.environ["HTTPS_PROXY"] = "https://root:Samsung1%21@105.128.219.200:8080" # os.environ["HTTP_PROXY"] = "http://root:Samsung1%21@105.128.219.200:8080" # else: es_conn = Elasticsearch(HOST_URLS, timeout=60) print('osname', os.name) TYPE_NAME_USER = '_doc' AWS_STORAGE_BUCKET_NAME = 'apollo4' response1 = None response2 = None response = None response4 = None responseTrain = None stopwords = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "you're", "you've", "you'll", "you'd", "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "she's", "her", "hers", "herself", "it", "it's", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "that'll", "these", "those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "don't", "should", "should've", "now", "d", "ll", "m", "o", "re", "ve", "y", "ain", "aren", "aren't", "couldn", "couldn't", "didn", "didn't", "doesn", "doesn't", "hadn", "hadn't", "hasn", "hasn't", "haven", "haven't", "isn", "isn't", "ma", "mightn", "mightn't", "mustn", "mustn't", "needn", "needn't", "shan", "shan't", "shouldn", "shouldn't", "wasn", "wasn't", "weren", "weren't", "won", "won't", "wouldn", "wouldn't"] @csrf_exempt def redirectChange(request): try: if request.method == "POST": get_value = request.body get_value = get_value.decode("utf-8") get_value = json.loads(get_value) redirectURL = get_value['redirectURL'] settings.LOGIN_REDIRECT_URL = redirectURL if redirectURL == '/': settings.LOGIN_REDIRECT_URL = '/home' return HttpResponse('done') except Exception as e: return HttpResponse( "Error running the program.") def index(request): return render(request, 'registration/login.html') @csrf_exempt def testing_data_upload_view(request): # global training_data index_name = 'testingfiledata' try: if request.method == 'POST': # userName = request.user.username # # es_conn.indices.create(index=index_name) # testing_data = request.FILES.getlist('testFile') # # testing_data = open(request.FILES.get('file').temporary_file_path(), 'r').read() # finalTestingData = Process_All_Files(testing_data) # query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # es_conn.delete_by_query(index=index_name, body=query) # if len(testing_data) > 0: # datafile = { # 'username': userName, # 'testing_data': finalTestingData # } # # es_conn.create(index=index_name, doc_type='_doc', body=datafile, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def training_data_upload_view(request): # global training_data index_name = 'trainingfiledata' try: if request.method == 'POST': # userName = request.user.username; # # training_data = open(request.FILES.get('file').temporary_file_path(), 'r').read() # # es_conn.indices.create(index=index_name) # training_data = request.FILES.getlist('trainFile') # # training_data = request.FILES.getlist('file').read().decode("ISO-8859-1") # finalTrainingData = Process_All_Files(training_data) # # res = es_conn.update(index=index_name,body={"doc": {"match_all": {}}}) # query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # es_conn.delete_by_query(index=index_name, body=query) # if len(training_data) > 0: # datafile = { # 'username': userName, # 'training_data': finalTrainingData # } # # es_conn.create(index=index_name, doc_type='_doc', body=datafile, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") # need to work on this def xls_response(request): workbook = '' response1 = '' try: if request.method == "GET": userName = request.user.username; index_name_globals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_globals, body=query) testingDataType = res['hits']['hits'][0]['_source']['testingDataType'] trainingDataType = res['hits']['hits'][0]['_source']['trainingDataType'] testingFileName = res['hits']['hits'][0]['_source']['testingFileName'] if testingDataType == 'Patent': workbook = load_workbook('./static/template_patent.xlsx') elif testingDataType == 'Journal': workbook = load_workbook('./static/template_journal.xlsx') if trainingDataType == 'Patent': workbook_training_data = load_workbook('./static/template_patent_training_data.xlsx') elif trainingDataType == 'Journal': workbook_training_data = load_workbook('./static/template_journal_training_data.xlsx') resultsSheet = workbook["Results"] rawDataSheet = workbook["Raw_Data"] resultsSheet_training_data = workbook_training_data["Results"] rawDataSheet_training_data = workbook_training_data["Raw_Data"] # Write results for predicted probabilities and class names to the sheet containing raw data # In excel, the indices start from 1, instead of 0 index_name_apollo = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_apollo, body=query) model_prob_all = res['hits']['hits'][0]['_source']['model_prob_all'] model_predicted = res['hits']['hits'][0]['_source']['model_predicted'] model_predicted = json.loads(model_predicted) model_predicted_training = res['hits']['hits'][0]['_source']['model_predicted_training'] model_predicted_training = json.loads(model_predicted_training) file_test_proc = res['hits']['hits'][0]['_source']['file_test_proc'] file_sample_proc = res['hits']['hits'][0]['_source']['file_sample_proc'] file_test_open = res['hits']['hits'][0]['_source']['file_test_open'] file_sample_open_training = res['hits']['hits'][0]['_source']['file_sample_open_training'] if testingDataType == 'Patent': # Now, the first line is header, so remove the first line for row_index in np.arange(len(file_test_proc)): doc = file_test_proc[row_index].split('\t') for column_index in np.arange(8): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=9).value = model_predicted[row_index].strip('\"') elif testingDataType == 'Journal': for row_index in np.arange(len(file_test_open)): doc = file_test_open[row_index].split('\t') for column_index in np.arange(6): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=7).value = model_predicted[row_index].strip('\"') if trainingDataType == 'Patent': # Write predictions on training data in a separate file: for row_index in np.arange(len(file_sample_proc)): doc = file_sample_proc[row_index].split('\t') for column_index in np.arange(8): resultsSheet_training_data.cell(row=row_index + 2, column=column_index + 1).value = doc[ column_index].strip('\"') resultsSheet_training_data.cell(row=row_index + 2, column=9).value = model_predicted_training[ row_index].strip('\"') elif trainingDataType == 'Journal': # Write predictions on training data in a separate file: for row_index in np.arange(len(file_sample_open_training)): doc = file_sample_open_training[row_index].split('\t') for column_index in np.arange(7): resultsSheet_training_data.cell(row=row_index + 2, column=column_index + 1).value = doc[ column_index].strip('\"') resultsSheet_training_data.cell(row=row_index + 2, column=8).value = model_predicted_training[ row_index].strip('\"') # In the Raw_Data sheet, write the class names starting from column B model_prob_all = json.loads(model_prob_all) column_header_index = 2 key_value = 'runDocumentClassifier/' key_value += userName + '/' s3 = boto3.client('s3') model = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') model = model['Body'].read() model = pickle.loads(model) for cls in model.classes_: rawDataSheet.cell(row=1, column=column_header_index).value = cls.strip('\r') column_header_index += 1 # Wirte all the probabilities for each class assgined by the model in the Raw_Data sheet for row_index in np.arange(len(model_prob_all)): for column_index in np.arange(len(model_prob_all[row_index])): # The first column in template excel file is formula for 'OTHERS', # hence start writing the probability values from second column in the excel sheet rawDataSheet.cell(row=row_index + 2, column=column_index + 2).value = model_prob_all[ row_index][column_index] # workbook.save(re.sub('.txt', '_Threshold_Analysis.xlsx',gui_parameters['testing_file_name'])) # thresholdAnalysisResultFileName = re.sub('.txt', '_Threshold_Analysis.xlsx', testingFileName) # response1 = HttpResponse(content_type='application/ms-excel') response1 = HttpResponse(content=save_virtual_workbook(workbook), content_type='application/vnd.ms-excel') response1['Content-Disposition'] = 'attachment; filename=' + thresholdAnalysisResultFileName # workbook.save(response1); return response1 except Exception as e: return HttpResponse( "Error running the program.") def training_data_xls_response(request): # global response2 response2 = '' try: if request.method == "GET": userName = request.user.username; index_name_globals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_globals, body=query) testingDataType = res['hits']['hits'][0]['_source']['testingDataType'] trainingDataType = res['hits']['hits'][0]['_source']['trainingDataType'] testingFileName = res['hits']['hits'][0]['_source']['testingFileName'] if testingDataType == 'Patent': workbook = load_workbook('./static/template_patent.xlsx') elif testingDataType == 'Journal': workbook = load_workbook('./static/template_journal.xlsx') if trainingDataType == 'Patent': workbook_training_data = load_workbook('./static/template_patent_training_data.xlsx') elif trainingDataType == 'Journal': workbook_training_data = load_workbook('./static/template_journal_training_data.xlsx') resultsSheet = workbook["Results"] rawDataSheet = workbook["Raw_Data"] resultsSheet_training_data = workbook_training_data["Results"] rawDataSheet_training_data = workbook_training_data["Raw_Data"] # Write results for predicted probabilities and class names to the sheet containing raw data # In excel, the indices start from 1, instead of 0 index_name_apollo = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_apollo, body=query) model_prob_all_training = res['hits']['hits'][0]['_source']['model_prob_all_training'] model_predicted = res['hits']['hits'][0]['_source']['model_predicted'] model_predicted = json.loads(model_predicted) model_predicted_training = res['hits']['hits'][0]['_source']['model_predicted_training'] model_predicted_training = json.loads(model_predicted_training) file_test_proc = res['hits']['hits'][0]['_source']['file_test_proc'] file_sample_proc = res['hits']['hits'][0]['_source']['file_sample_proc'] file_test_open = res['hits']['hits'][0]['_source']['file_test_open'] file_sample_open_training = res['hits']['hits'][0]['_source']['file_sample_open_training'] if testingDataType == 'Patent': for row_index in np.arange(len(file_test_proc)): doc = file_test_proc[row_index].split('\t') for column_index in np.arange(8): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=9).value = model_predicted[row_index].strip('\"') elif testingDataType == 'Journal': for row_index in np.arange(len(file_test_open)): doc = file_test_open[row_index].split('\t') for column_index in np.arange(6): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=7).value = model_predicted[row_index].strip('\"') if trainingDataType == 'Patent': # Write predictions on training data in a separate file: for row_index in np.arange(len(file_sample_proc)): doc = file_sample_proc[row_index].split('\t') for column_index in np.arange(8): resultsSheet_training_data.cell(row=row_index + 2, column=column_index + 1).value = doc[ column_index].strip('\"') resultsSheet_training_data.cell(row=row_index + 2, column=9).value = model_predicted_training[ row_index].strip('\"') elif trainingDataType == 'Journal': # Write predictions on training data in a separate file: for row_index in np.arange(len(file_sample_open_training)): doc = file_sample_open_training[row_index].split('\t') for column_index in np.arange(7): resultsSheet_training_data.cell(row=row_index + 2, column=column_index + 1).value = doc[ column_index].strip('\"') resultsSheet_training_data.cell(row=row_index + 2, column=8).value = model_predicted_training[ row_index].strip('\"') # In the Raw_Data sheet, write the class names starting from column B model_prob_all_training = json.loads(model_prob_all_training) column_header_index = 2 key_value = 'runDocumentClassifier/' key_value += userName + '/' s3 = boto3.client('s3') model = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') model = model['Body'].read() model = pickle.loads(model) for cls in model.classes_: rawDataSheet.cell(row=1, column=column_header_index).value = cls.strip('\r') column_header_index += 1 # Write all the probabilities for each class assgined by the model in the Raw_Data sheet for row_index in np.arange(len(model_prob_all_training)): for column_index in np.arange(len(model_prob_all_training[row_index])): # The first column in template excel file is formula for 'OTHERS', # hence start writing the probability values from second column in the excel sheet rawDataSheet_training_data.cell(row=row_index + 2, column=column_index + 2).value = \ model_prob_all_training[row_index][column_index] # workbook_training_data.save( # re.sub('.txt', '_Threshold_Analysis_Training_Data.xlsx', gui_parameters['testing_file_name'])) thresholdAnalysisTrainingDataResultFileName = re.sub('.txt', '_Threshold_Analysis_Training_Data.xlsx', testingFileName) # # response2 = HttpResponse(content_type='application/ms-excel') response2 = HttpResponse(content=save_virtual_workbook(workbook_training_data), content_type='application/vnd.ms-excel') response2['Content-Disposition'] = 'attachment; filename=' + thresholdAnalysisTrainingDataResultFileName # workbook.save(response2); return response2 except Exception as e: return HttpResponse( "Error running the program. Please contact the IP Group Analytics Team () to resolve the issue. Please provide the error details below in your email. \nPlease provide all the steps to reproduce this issue. \n" + "-" * 40 + "\n" + str( e) + "\n" + "-" * 40) # need to change till here ####################file download functions starts######################## ####################file download functions ends######################## @csrf_exempt def data(request): # global patentOrJournalTrainingData index_name = 'patentorjournal' try: if request.method == "POST": userName = request.user.username; get_value = request.body get_value = get_value.decode("utf-8") patentOrJournalTrainingData = '' if "identification number\ttitle\tabstract\tclaims\tapplication number\tapplication date\tcurrent assignee\tupc" in get_value.lower(): patentOrJournalTrainingData = 'Patent' elif "meta data\ttitle\tabstract\tauthor\taffiliation\tpublished year" in get_value.lower(): patentOrJournalTrainingData = 'Journal' elif 'nasca' in get_value.lower(): patentOrJournalTrainingData = 'NASCA File Error.' else: patentOrJournalTrainingData = 'Training File Error.' return HttpResponse(patentOrJournalTrainingData) except Exception as e: return HttpResponse( "Error running the program.") @login_required(login_url='/login/') def home(request): return render(request, 'apollo4/home.html') @csrf_exempt @login_required(login_url='/login/') def svl(request): return render(request, 'apollo4/superVisedLearning.html') @csrf_exempt @login_required(login_url='/login/') def usvl(request): return render(request, 'apollo4/unSuperVised.html') @csrf_exempt @login_required(login_url='/login/') def em(request): return render(request, 'apollo4/existingModel.html') @csrf_exempt @login_required(login_url='/login/') def emus(request): return render(request, 'apollo4/existingModelUnsupervised.html') @csrf_exempt def da(request): return render(request, 'apollo4/dataannotate.html') @csrf_exempt @login_required(login_url='/login/') def il(request): return render(request, 'apollo4/incrementalLearning.html') @csrf_exempt @login_required(login_url='/login/') def ilu(request): return render(request, 'apollo4/incrementalLearningUnsupervised.html') @csrf_exempt @login_required(login_url='/login/') def ps(request): return render(request, 'apollo4/patentScoring.html') @csrf_exempt @login_required(login_url='/login/') def da(request): return render(request, 'apollo4/dataannotate.html') def um(request): return render(request, 'apollo4/user_manual.html') def logout(request): auth.logout(request) return render(request, 'registration/login.html') @csrf_exempt def fetch_update(request): try: if request.method == 'POST': userName = request.user.username; index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) data = res['hits']['hits'][0]['_source'] return JsonResponse({'data': data}) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def userRunModelTrack(request): try: if request.method == 'POST': try: userName = request.user.username; userTrack = request.body.decode('utf-8'); userTrack = json.loads(userTrack); testingFileName = userTrack['testing_file_name'] trainingFileName = userTrack['training_file_name'] testingDataType = userTrack['testing_data_type'] trainingDataType = userTrack['training_data_type'] str_model_name = userTrack['model'] automaticMode = userTrack['automatic_mode'] additional_stopwords = userTrack['additional_stopwords'] target_performance_measure = userTrack['target_performance_measure'] except Exception as e: print('parsing went wrong', e) time = datetime.now() time = time.strftime("%I:%M%p on %B %d, %Y") update_fields = { 'username': userName, 'testingFileName': testingFileName, 'trainingFileName': trainingFileName, 'testingDataType': testingDataType, 'trainingDataType': trainingDataType, 'str_model_name': str_model_name, 'automaticMode': automaticMode, 'additional_stopwords': additional_stopwords, 'target_performance_measure': target_performance_measure, 'time': time + ' UTC Time' } index_name = 'userrunmodeltrack' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def runSupervisedSaving(request): try: if request.method == 'POST': userName = request.user.username; update_fields = { 'username': userName, 'progressbar_maximum': 0, # progress bar max_value 'progressbar_value': 0, # progress bar value 'saved_project_status': 0, 'progressbarlabel_text': '', # progress field 'progress_text': '', # progress text 'trainingDataNumInstances': None, # Training examples 'trainingDataNumClasses': None, # training classes 'trainingDataStatistics': None, # trainig data table 'trainingDataPerformance': None, # model evaluation before +/- 'trainingDataPerformancesStandardDeviations': None, # model evaluation after +/- 'str_parameter_name': None, # Hyper parameter left before = 'optimal_model_parameter': None, # Hyper parameter right after = 'testingDataNumInstances': None, # Testing examples 'testingDataStatistics': None, # testing data table 'final_progress_value': 0, 'user_defined_stopwords': '', 'testingFileName': '', 'trainingFileName': '', 'excel_status_code': 0, 'current_tab': 0, 'newclassString': '', 'errorString': '', 'file_test_proc': '', 'model_predicted': '', 'file_test_open': '', 'file_sample_proc': '', 'model_predicted_training': '', 'file_sample_open_training': '', 'model': '', 'model_prob_all_training': '', 'model_prob_all': '', 'testingFileName': '', 'trainingFileName': '', 'testingDataType': '', 'trainingDataType': '', 'model_isotonic_calibration': None, 'model_sigmoid_calibration': None, 'tfidfVectorizer': None } index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} es_conn.delete_by_query(index=index_name_gloabals, body=query) es_conn.create(index=index_name_gloabals, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") ####################runDocumentClassifierSupervised starts######################## @csrf_exempt def runDocumentClassifierSupervised(request): relevanceThreshold = 0.0 file_others = [] file_classified = [] model_classified = [] global responsesupervised try: # Assuming that the client-side has already selected the options before running the program. # Assuming that the request from the client side will have all the fields necessary for running the program. if request.method == "GET": return responsesupervised if request.method == "POST": try: userName = request.user.username; finalTrainingData = request.FILES.getlist('trainFile') finalTestingData = request.FILES.getlist('testFile') gui_parameters = request.POST.getlist('inputData')[0] gui_parameters = json.loads(gui_parameters); testingFileName = gui_parameters['testing_file_name'] trainingFileName = gui_parameters['training_file_name'] testingDataType = gui_parameters['testing_data_type'] trainingDataType = gui_parameters['training_data_type'] str_model_name = gui_parameters['model'] except Exception as e: print('parsing went wrong', e) # Thread(target=runSupervisedSaving,args=(userName,)).start() # set the progress bar values training_data = Process_All_Files(finalTrainingData) testing_data = Process_All_Files(finalTestingData) index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] if gui_parameters['automatic_mode'] == 'True': progressbar_maximum = 235 else: progressbar_maximum = 100 # apollo4.globals.progressbar_maximum = 100 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "current_tab": 1, "testingFileName": testingFileName, "trainingFileName": trainingFileName, "testingDataType": testingDataType, "trainingDataType": trainingDataType}}) # apollo4.globals.progressbar_value = 0 # 1. supervised learning # Set the text in progressbarlabel programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progressbarlabel_text = '' progressbarlabel_text = programRunStartTimeLabel progress_text = '' progress_text = progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + \ '-' * 75 + '\n' + "Starting document classification process..." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": 0, 'progressbarlabel_text': progressbarlabel_text, 'progress_text': progress_text}}) # The code for loading and pre-processing the data is different for patent and journal data if gui_parameters['training_data_type'] == 'Patent': file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents! \nPreprocessing documents...\n" progressbar_value = 5 # Set value of progressbar to 5 once the training dataset is loaded # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # append user-provided stop words user_defined_stopwords = (" ".join(gui_parameters['additional_stopwords'].lower().split(','))).split() es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "user_defined_stopwords": user_defined_stopwords}}) stops = stops + user_defined_stopwords # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 # Preprocess the sample file # MS: Need to check whether we will receive the file or the text from the GUI. # Baesd on that, change the function below. (file_sample_proc, file_sample_stem, temp) = preprocess_collection(file_sample_open, stopterms, True, progress_text) file_sample = list(filter(None, file_sample_stem)) title_samples = [doc.split('\t')[1].strip('\r').strip('\n') for doc in file_sample] abstract_samples = [doc.split('\t')[2].strip('\r').strip('\n') for doc in file_sample] claim_samples = [doc.split('\t')[3].strip('\r').strip('\n') for doc in file_sample] label_samples = [doc.split('\t')[8].lower().strip('\r').strip('\n') for doc in file_sample] labels = sorted(list(set(label_samples))) labels.append(u'others') train_data = [' '.join(doc) for doc in zip(title_samples, abstract_samples, claim_samples)] train_target = label_samples es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "user_defined_stopwords": user_defined_stopwords, "file_sample_proc": file_sample_proc}}) # End patent training data elif gui_parameters['training_data_type'] == 'Journal': # MS: Need to check whether we will receive the file or the text from the GUI. # Baesd on that, change the function below. file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents! \nPreprocessing documents...\n" # Set value of progressbar to 5 once the training dataset is loaded progressbar_value = 5 # Remove the duplicated documents based on "title" file_sample_open_training = dedup_collection_journal(file_sample_open, 1, 2) # Preprocessing for scoupus data file_sample_open = preprocess_collection_journal(file_sample_open_training) # Take the stopwords from the GUI and add them to the stopwords list user_defined_stopwords = gui_parameters['additional_stopwords'].lower() file_sample_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_sample_open] # Training Phase label_samples = [doc.split('\t')[-1].lower().strip('\r').strip('\n') for doc in file_sample_open] labels = sorted(list(set(label_samples))) labels.append(u'others') train_data = file_sample_data train_target = label_samples # end journal training data preprocessing es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "user_defined_stopwords": user_defined_stopwords, "file_sample_open_training": file_sample_open_training}}) progress_text = progress_text + "Removed duplicates and preprocessed " + str( len(train_data)) + " documents." # Set value of progressbar to 10 once the training dataset is preprocessed progressbar_value += 5 trainingDataNumInstances = len(train_target) trainingDataNumClasses = len(set(train_target)) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "trainingDataNumInstances": trainingDataNumInstances, "trainingDataNumClasses": trainingDataNumClasses}}) # Display information about training examples class distribution numInstancesTrainingData = len(train_data) trainingDataStats = [] leastDocumentsForAClass = 5 # initialize to 5 for label in set(train_target): distribution = str(np.round(train_target.count(label) * 100.0 / len(train_target) * 1.0, 2)) + '%' trainingDataStats.append([label, train_target.count(label), distribution]) if train_target.count(label) < leastDocumentsForAClass: leastDocumentsForAClass = train_target.count(label) # Update the treeview with the distribution of instances in the training data trainingDataStatistics = trainingDataStats # Make sure that there are at least 5 documents for each class: # this is required to perform 5-fold cross validation if leastDocumentsForAClass < 5: progress_text = progress_text + "*" * 50 + "\nThe program requires at least 5 training examples for each class. Please provide at least 5 training examples for each class and re-run the program." + '\n' + "*" * 50 errorString = 'The program requires at least 5 training examples for each class. Please provide at least 5 training examples for each class and re-run the program.' final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "errorString": errorString, "final_progress_value": final_progress_value}}) # Display a messagebox in the GUI return HttpResponse( "The program requires at least 5 training examples for each class. Please provide at least 5 training examples for each class and re-run the program.") if len(set(train_target)) < 3 and 'One vs Rest' in str_model_name: # Display a messagebox in the GUI errorString = 'One vs Rest models are supported for only more than two classes in the data. There are less than three classes. Please select a model that is NOT One vs Rest.' final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"errorString": errorString, "final_progress_value": final_progress_value}}) return HttpResponse( "One vs Rest models are supported for only more than two classes in the data. There are less than three classes. Please select a model that is NOT One vs Rest.") progress_text = progress_text + "\nStarting model training..." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataStatistics": json.dumps(trainingDataStatistics), "progress_text": progress_text}}) cv = CountVectorizer() tfidf = TfidfTransformer() # Changed the n-grams to (1,5) in the line below, and max_df from 0.5 to 0.8, based on side-experiments tfidf_vect = TfidfVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, max_df=0.8, max_features=200000, stop_words='english', use_idf=True) # tf-idf with params train_tfidf = tfidf_vect.fit_transform(train_data) # Set value of progressbar to 15 once the training dataset is vectorized progressbar_value += 5 tfidfVectorizer = tfidf_vect # Model and model parameters svm_alpha = 1.0 # default value mnb_alpha = 0.001 # default value lrl2_alpha = 1.0 # default value svm_kernel = 'linear' # default value class_weight = None # default value batchSize = 16 maxSequenceLength = 256 automatic_mode = False optimal_model_parameter = -1 progress_text = progress_text + "\nOptimizing model parameters..." str_parameter_name = 'Alpha = ' key_value = 'runDocumentClassifier/' key_value += userName + '/' s3 = boto3.client('s3') tfidf = pickle.dumps(tfidfVectorizer) s3.put_object(Body=tfidf, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"optimal_model_parameter": optimal_model_parameter, "progress_text": progress_text, "str_parameter_name": str_parameter_name, "progressbar_value": progressbar_value}}) if gui_parameters['automatic_mode'] == True: str_model_name = 'automatic' if str_model_name == 'automatic': # determine the best model based on evaluating several models automatic_mode = True progress_text = progress_text + "\nEvaluating all the models with optimal parameter settings..." chosen_model, optimal_model_parameter, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getBestModelAndHyperParameters( train_tfidf, train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations # set model to chosen_model str_model_name = chosen_model str_parameter_name = 'Alpha = ' progress_text = progress_text + "\n***************************************************************************" + \ "\nThe BEST model based on 5-fold cross validation on training data is: " + str_model_name + \ "\nPlease refer to the detailed results for each model in the PROGRESS frame above." + \ "\n***************************************************************************" es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformances": trainingDataPerformances, 'trainingDataPerformancesStandardDeviation': trainingDataPerformancesStandardDeviation, "progress_text": progress_text, "str_parameter_name": str_parameter_name}}) if str_model_name == 'Multinomial Naive Bayes': # Get optimal alpha for the model mnb_alpha = -1 if automatic_mode == False: mnb_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForMNB_alpha( train_tfidf.todense(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = mnb_alpha else: # The best model is already computed and best parameter is already determined mnb_alpha = optimal_model_parameter model = MultinomialNB(alpha=mnb_alpha).partial_fit(train_tfidf.todense(), train_target, classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Multinomial_Naive_Bayes_Alpha=' + str( mnb_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) elif str_model_name == 'Logistics Regression': # Get optimal alpha for the model lrl2_alpha = -1 if automatic_mode == False: lrl2_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForLR_alpha( train_tfidf.toarray(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = lrl2_alpha else: lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, # and we need to epoch multiple times model = SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=class_weight, random_state=random_state) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Logistic_Regression_Alpha=' + str( lrl2_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) elif str_model_name == 'Support vector Machine': # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if automatic_mode == False: svm_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForSVM_alpha( train_tfidf.toarray(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = svm_alpha else: svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, # and we need to epoch multiple times model = SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=class_weight, random_state=random_state) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Support_Vector_Machine_Alpha=' + str( svm_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) if str_model_name == 'One vs Rest(Multinomial Naive Bayes)': # Get optimal alpha for the model mnb_alpha = -1 if automatic_mode == False: mnb_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRMNB_alpha( train_tfidf.todense(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = mnb_alpha else: # The best model is already computed and best parameter is already determined mnb_alpha = optimal_model_parameter model = OneVsRestClassifier(MultinomialNB(alpha=mnb_alpha)).partial_fit(train_tfidf.todense(), train_target, classes=np.unique(train_target)) trainedModel = model trainedModelName = 'One_vs_Rest_Multinomial_Naive_Bayes_Alpha=' + str( mnb_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) elif str_model_name == 'One vs Rest(Logistic Regression)': # Get optimal alpha for the model lrl2_alpha = -1 if automatic_mode == False: lrl2_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRLR_alpha( train_tfidf.toarray(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = lrl2_alpha else: lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model = OneVsRestClassifier( SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=class_weight, random_state=random_state)) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'One_vs_Rest_Logistic_Regression_Alpha=' + str( lrl2_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) elif str_model_name == 'One vs Rest(Support vector Machine)': # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if automatic_mode == False: svm_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRSVM_alpha( train_tfidf.toarray(), train_data, train_target, gui_parameters['target_performance_measure']) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = svm_alpha else: svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, # and we need to epoch multiple times model = OneVsRestClassifier( SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=class_weight, random_state=random_state)) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model # needs to be clarified######################################################################################################### trainedModelName = 'One_vs_Rest_Support_Vector_Machine_Alpha=' + str( svm_alpha) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') ################################################################################################################################# es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) else: # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if str_model_name == 'BERT': dl_model_type = 'bert' dl_model_name = 'bert-base-cased' elif str_model_name == 'RoBERTa': dl_model_type = 'roberta' dl_model_name = 'roberta-base' elif str_model_name == 'XLNet': dl_model_type = 'xlnet' dl_model_name = 'xlnet-base-cased' if automatic_mode == False: # Need to set the output directory, where the temporary results for the model training will be stored torch.cuda.empty_cache() DEEP_LEARNING_OUTPUT_DIR = './DeepLearningOutputs/' + userName + '/' if not os.path.exists(DEEP_LEARNING_OUTPUT_DIR): os.makedirs(DEEP_LEARNING_OUTPUT_DIR) print(123) cudaExist = torch.cuda.is_available() deeperror = { 'username': userName, 'error': cudaExist, } index_name = 'deeplearningerror' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type=TYPE_NAME_USER, body=deeperror, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type=TYPE_NAME_USER, body=deeperror, id=uuid.uuid4()) optimal_parameters, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParametersForDeepLearning( dl_model_type, dl_model_name, training_data, gui_parameters['target_performance_measure'], DEEP_LEARNING_OUTPUT_DIR) print(345) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations optimal_model_parameter = str(optimal_parameters[0]) + '_' + str(optimal_parameters[1]) batchSize = optimal_parameters[0] maxSequenceLength = optimal_parameters[1] random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, # and we need to epoch multiple times total_labels = len(Counter(train_target)) # Need to set the output directory, where the temporary results for the model training will be stored torch.cuda.empty_cache() DEEP_LEARNING_OUTPUT_DIR = './DeepLearningOutputs/' + userName + '/' if not os.path.exists(DEEP_LEARNING_OUTPUT_DIR): os.makedirs(DEEP_LEARNING_OUTPUT_DIR) # Create a TransformerModel trainedModel = DeepLearningModel(dl_model_type, dl_model_name, batchSize, maxSequenceLength, num_epochs=30, random_state=4987, output_dir=DEEP_LEARNING_OUTPUT_DIR) trainedModel.fit(training_data) trainedModelName = 'BERT_BatchSize=' + str(batchSize) + '_MaxSequenceLength=' + str( maxSequenceLength) # needs to be clarified######################################################################################################### trainedModelName = 'BERT_BatchSize=' + str(batchSize) + '_MaxSequenceLength=' + str( maxSequenceLength) + '_' + gui_parameters['target_performance_measure'].replace(' (default)', '') ################################################################################################################################# es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"trainingDataPerformance": list(trainingDataPerformances), "trainingDataPerformancesStandardDeviations": list( trainingDataPerformancesStandardDeviation), "optimal_model_parameter": optimal_model_parameter, "trainedModelName": trainedModelName}}) # Prediction Phase trainedModel = pickle.dumps(trainedModel) s3.put_object(Body=trainedModel, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') # The code for patent and journal testing data is different because it required different preprocessing if gui_parameters['testing_data_type'] == 'Patent': file_test_open = testing_data if 'Deep Learning' not in str_model_name: file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "\nPreprocessing unlabeled data..." # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 (file_test_proc, file_test_stem, temp) = preprocess_collection(file_test_open, stopterms, False, progress_text) file_test_proc = list(filter(None, file_test_proc)) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"file_test_proc": file_test_proc}}) file_test = list(filter(None, file_test_stem)) title_test = [doc.split('\t')[1] for doc in file_test] abstract_test = [doc.split('\t')[2] for doc in file_test] claim_test = [doc.split('\t')[3] for doc in file_test] test_data = [' '.join(doc) for doc in zip(title_test, abstract_test, claim_test)] progressbar_value += 5 progress_text = progress_text + "\nMaking predictions on unlabeled data..." es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) model_predicted = model.predict(test_data_tfidf.todense()) model_predicted = model_predicted.astype('U128') else: progressbar_value += 5 progress_text = progress_text + "\nMaking predictions on unlabeled data..." es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) model_predicted = model.predict(testing_data) model_predicted = model_predicted.astype('U128') elif gui_parameters['testing_data_type'] == 'Journal': file_test_open = testing_data if 'Deep Learning' not in str_model_name: file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "\nPreprocessing unlabeled data..." # Remove the duplicated document based on "title" file_test_open = dedup_collection_journal(file_test_open, 1, 2) # preprocessing for scoupus data file_test_proc = preprocess_collection_journal(file_test_open) user_defined_stopwords = [] test_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_test_proc] progressbar_value += 5 progress_text = progress_text + "\nMaking predictions on unlabeled data..." # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) model_predicted = model.predict(test_data_tfidf.todense()) model_predicted = model_predicted.astype('U128') else: progressbar_value += 5 progress_text = progress_text + "\nMaking predictions on unlabeled data..." es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) model_predicted = model.predict(testing_data) model_predicted = model_predicted.astype('U128') # end journal test data preprocessing model_predicted_training = model.predict(train_tfidf.todense()) model_predicted_training = model_predicted_training.astype('U128') model_isotonic_calibration = None model_sigmoid_calibration = None es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "model_predicted": json.dumps(model_predicted.tolist()), "model_predicted_training": json.dumps(model_predicted_training.tolist()), "file_test_open": file_test_open, "file_test_proc": file_test_proc}}) model_prob = None # The 'predict_proba 'function cannot be used for SGD with hinge loss, # hence we need calibrate the probability estimates for SGD with hinge loss if str_model_name == 'Support vector Machine' or str_model_name == 'One vs Rest(Support vector Machine)': # calibrate probabilities that will be used by the excel sheet if len(train_target) > 500: model_isotonic_calibration = CalibratedClassifierCV(model, cv="prefit", method='isotonic') model_isotonic_calibration.fit(train_tfidf.todense(), train_target) model_isotonic_calibrations = pickle.dumps(model_isotonic_calibration) s3.put_object(Body=model_isotonic_calibrations, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') model_prob = model_isotonic_calibration.predict_proba(test_data_tfidf.todense()) model_prob_all_training = model_isotonic_calibration.predict_proba( train_tfidf.todense()) else: model_sigmoid_calibration = CalibratedClassifierCV(model, cv="prefit", method='sigmoid') model_sigmoid_calibrations = pickle.dumps(model_sigmoid_calibration) s3.put_object(Body=model_sigmoid_calibrations, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') model_sigmoid_calibration.fit(train_tfidf.todense(), train_target) model_prob = model_sigmoid_calibration.predict_proba(test_data_tfidf.todense()) model_prob_all_training = model_sigmoid_calibration.predict_proba( train_tfidf.todense()) else: if 'Deep Learning' in str_model_name: model_prob = model.predict_proba(testing_data) model_prob_all_training = model.predict_proba(training_data) else: model_prob = model.predict_proba(test_data_tfidf.todense()) model_prob_all_training = model.predict_proba(train_tfidf.todense()) # classify the patent with rel. threshold < th to "others" class model_prob_all = copy.copy(model_prob) model_prob[model_prob < relevanceThreshold] = 0.0 model_prob_new = np.sum(model_prob, axis=1) model_predicted[model_prob_new == 0] = 'others' testingDataNumInstances = len(model_predicted) es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"testingDataNumInstances": testingDataNumInstances, "model_prob_all_training": json.dumps(model_prob_all_training.tolist()), "model_prob_all": json.dumps(model_prob_all.tolist())}}) # Update the treeview with the distribution of instances in the training data testingDataStatistics = [] for label in set(model_predicted): distribution = str( np.round(model_predicted.tolist().count(label) * 100.0 / len(model_predicted) * 1.0, 2)) + '%' testingDataStatistics.append( [label, model_predicted.tolist().count(label), distribution]) progressbar_value += 10 progress_text = progress_text + "\nSaving results..." excel_status_code = 200 testLabels = labels progressbar_value += 10 programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel # textboxProgress.config(state=NORMAL) progress_text = progress_text + \ "\nPatent classification completed!\nPlease check all the results in the file: " + re.sub( '.txt', '_Threshold_Analysis.xlsx', gui_parameters['testing_file_name']) + \ "\n" + programRunStartTimeLabel progressbar_value += 5 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"testingDataStatistics": json.dumps(testingDataStatistics), 'progressbar_value': progressbar_value, "progress_text": progress_text, "excel_status_code": excel_status_code, "final_progress_value": final_progress_value, "progressbarlabel_text": progressbarlabel_text}}) if testingDataType == 'Patent': # Need to be clarified Prediction_Results_patent_FileName = re.sub('.txt', '_Prediction_Results.txt', testingFileName); # with open(Prediction_Results_patent_FileName, 'w') as fout: fout = '' for doc, model_i in zip(file_test_proc, model_predicted): doc = doc.strip('\r').strip('\n') doc = doc.replace('"', ' ') fout += '\t'.join([doc, model_i.strip('\t').strip('\r').strip('\n')]) + '\n' if model_i == 'others': file_others.append(doc) responsesupervised = HttpResponse(file_others, content_type='text/plain') responsesupervised['Content-Disposition'] = 'attachment; filename=' + Prediction_Results_patent_FileName else: file_classified.append(doc) responsesupervised = HttpResponse(file_classified, content_type='text/plain') responsesupervised['Content-Disposition'] = 'attachment; filename=' + Prediction_Results_patent_FileName model_classified.append(model_i) elif testingDataType == 'Journal': # Need to be clarified Prediction_Results_journal_FileName = re.sub('.txt', '_Prediction_Results.txt', testingFileName); fout = '' # with open(Prediction_Results_journal_FileName, 'w') as fout: for doc, model_i in zip(file_test_proc, model_predicted): doc = doc.strip('\r').strip('\n') doc = doc.replace('"', ' ') fout += '\t'.join([doc, model_i.strip('\t').strip('\r').strip('\n')]) + '\n' if model_i == 'others': file_others.append(doc) responsesupervised = HttpResponse(file_others, content_type='text/plain') responsesupervised['Content-Disposition'] = 'attachment; filename=' + Prediction_Results_journal_FileName else: file_classified.append(doc) responsesupervised = HttpResponse(file_classified, content_type='text/plain') responsesupervised['Content-Disposition'] = 'attachment; filename=' + Prediction_Results_journal_FileName model_classified.append(model_i) # model_predicted_index = [labels.index(cls) for cls in model_predicted.tolist()] # model_classified_index = [labels.index(cls) for cls in model_classified] return HttpResponse('successfully executed') except Exception as e: errorString = 'Error while running the program please contact IP Group Analytics team.' final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"final_progress_value": final_progress_value, "errorString": errorString}}) print(sys.exc_info()) return HttpResponse("Error running the program {}".format(sys.exc_info()[-1].tb_lineno) + str(e)) def preprocess_collection(file_open, stopterms, printInfo, progress_text): # apollo4.globals.progress_text master = [] # list for used application numbers repeat = [] # list for duplicate application numbers file_temp = [] # updated collection file counter = 0 num_docs = len(file_open) for index, doc in enumerate(file_open, start=1): try: apn = doc.split("\t") apn = apn[4].upper() if apn not in master: file_temp.append(doc) master.append(apn) counter = counter + 1 elif apn in master: repeat.append(apn) except Exception as e: progress_text = progress_text + "*" * 50 + "\n" + "ERROR: The document number %d in the file could not be processed" % index + "\n" + "-" * 50 continue # step through collection docs (ie lines) file_proc = [] file_stem = [] file_stop_words_removed = [] design_count = 0 # counter for design cases utility_count = 0 # counter for utility cases for file_index, file_line in enumerate(file_temp, start=1): file_line = file_line.split("\t") # split by tab # take correct col number docs only try: no = str(file_index) file_t = file_line[1] # title file_a = file_line[2] # abstract file_c = file_line[3] # claims apn = file_line[4].lower() apd = file_line[5] asgn = file_line[6].lower() if len(file_line) > 7: upc = file_line[7].lower() if len(file_line) > 8: label = file_line[8].lower() # solve the issue if label has tab except Exception as e: progress_text = progress_text + "*" * 50 + "\n" + "\n" + "ERROR: The document number %d in the file could not be processed" % file_index + "\n" + "-" * 50 if apn.startswith("us2"): # filter out design cases progress_text = progress_text + "*" * 50 + "\n" + "Design patent found! App_No: %r\tUPC: %r" % ( apn, upc) + '\n' + "-" * 50 design_count = design_count + 1 elif apn.startswith("us"): # filter out non-apn lines (ie not patent data) utility_count = utility_count + 1 # stop and stem title, abstract, claim file_t_stem = stop_and_stem(file_t, stopterms) file_a_stem = stop_and_stem(file_a, stopterms) file_c_stem = stop_and_stem(file_c, stopterms) # remove stopwords from the title, abstract, claim file_t_stop = remove_stopwords(file_t, stopterms) file_a_stop = remove_stopwords(file_a, stopterms) file_c_stop = remove_stopwords(file_c, stopterms) # Output the orginal clean version of utility patent file_new_line = '\t'.join(file_line) file_proc.append(file_new_line) # Output the preprocessed version of utility patent if len(file_line) > 7: proc_doc = [no, file_t_stem, file_a_stem, file_c_stem, apd, apn, asgn, upc] proc_doc_stop = [no, file_t_stop, file_a_stop, file_c_stop, apd, apn, asgn, upc] else: proc_doc = [no, file_t_stem, file_a_stem, file_c_stem, apd, apn, asgn] proc_doc_stop = [no, file_t_stop, file_a_stop, file_c_stop, apd, apn, asgn] if len(file_line) > 8: # solve the issue if label has tab proc_doc.append(label) proc_doc_stop.append(label) proc_doc = '\t'.join(proc_doc) proc_doc_stop = '\t'.join(proc_doc_stop) file_stem.append(proc_doc) file_stop_words_removed.append(proc_doc_stop) output = (file_proc, file_stem, file_stop_words_removed) return output def dedup_collection_journal(file_open, uid, abstract_id): try: new_file_list = [] new_item_list = [] for doc in file_open: item = '\t'.join(doc.split('\t')[uid: abstract_id + 1]) if item not in new_item_list: new_item_list.append(item) new_file_list.append(doc) return new_file_list except Exception as e: return HttpResponse( "Error running the program.") def preprocess_collection_journal(file_open): # Sentence Level Preprocessing try: file_proc = [] for file_index, file_line in enumerate(file_open, start=1): doc = file_line.split('\t') abstract = doc[2] # remove copyright info try: copyright_str = [u'\ufffd', 'copyright', 'ieee', 'acm', 'rights reserved', 'press', 'all rights reserved'] sent_list = list(filter(None, abstract.split('. '))) new_sent_list = [sent for sent in sent_list if all(x not in sent.decode('utf-8').lower() for x in copyright_str)] new_abstract = '. '.join(new_sent_list) doc[2] = new_abstract except: pass file_proc.append('\t'.join(doc)) return file_proc except Exception as e: return HttpResponse( "Error running the program.") def stop_and_stem_journal(file_t, user_defined_stopwords): ''' Input: file_t: a text string, stopterms: a dictionary of stop terms Output: file_stem: a list of stopped and stemmed terms ''' try: stopterms = build_stopterms_journal(user_defined_stopwords) # remove the patent specific terms file_t = file_t.lower() # lowercase all file_t = re.sub("[^a-zA-Z0-9 ]", " ", file_t) # remove non-alphanumeric file_t = re.sub("\s[0-9]+$", '', file_t) file_t = re.sub("\s[0-9]+\s", ' ', file_t) file_t = re.sub("^[0-9]+\s", '', file_t) file_t = re.sub("androids*", "antroid", file_t) file_t = re.sub("andes", "antes", file_t) file_t = re.sub("and[0-9a-z]+", "", file_t) file_t = re.sub("antroid", "android", file_t) file_t = re.sub("antes", "andes", file_t) file_t = re.sub("including[0-9a-z]+", "", file_t) file_t = re.sub("wherein[0-9a-z]+", "", file_t) file_t = re.sub("comprising[0-9a-z]+", "", file_t) formula_chk0 = re.findall(" formula | math ", file_t) formula_chk1 = re.findall(" tail ", file_t) formula_chk2 = re.findall(" lead ", file_t) if len(formula_chk0) > 0 and len(formula_chk1) > 0 and len(formula_chk2) > 0: file_t = re.sub(" formula | math ", " ", file_t) file_t = re.sub(" tail ", " ", file_t) file_t = re.sub(" lead ", " ", file_t) file_t = " ".join(file_t.split()) # split by any whitespace and rejoin w/ space file_t = file_t.split(" ") # split by space # remove the stop terms in the text file_stop = [] # initialize list for term in file_t: if term not in stopterms: file_stop.append(term) # stem using porter algorithm file_stem = [] # initialize list for term in file_stop: try: term = wn().lemmatize(term) except: pass term = str(term) file_stem.append(term) file_stem = ' '.join(file_stem) return file_stem except Exception as e: return HttpResponse( "Error running the program.") def build_stopterms_journal(user_defined_stopwords): # Build stop terms try: stops = stopwords aux_stops = './static/AuxStops-Journal.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = (" ".join(aux_stops.split())).split() # remove white space aux_stops = list(filter(None, aux_stops)) stops = stops + aux_stops if len(user_defined_stopwords) > 0: user_defined_stops = (" ".join(user_defined_stopwords.split(','))).split() # Add user defined stop words to the stop words list stops = stops + user_defined_stops # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 return stopterms except Exception as e: return HttpResponse( "Error running the program. ") def stop_and_stem(file_t, stopterms): ''' Input: file_t: a text string, stopterms: a dictionary of stop terms Output: file_stem: a list of stopped and stemmed terms ''' # remove the patent specific terms try: file_t = file_t.lower() # lowercase all file_t = re.sub("[^a-zA-Z0-9 ]", " ", file_t) # remove non-alphanumeric file_t = re.sub("\s[0-9]+$", '', file_t) file_t = re.sub("\s[0-9]+\s", ' ', file_t) file_t = re.sub("^[0-9]+\s", '', file_t) file_t = re.sub("androids*", "antroid", file_t) file_t = re.sub("andes", "antes", file_t) file_t = re.sub("and[0-9a-z]+", "", file_t) file_t = re.sub("antroid", "android", file_t) file_t = re.sub("antes", "andes", file_t) file_t = re.sub("including[0-9a-z]+", "", file_t) file_t = re.sub("wherein[0-9a-z]+", "", file_t) file_t = re.sub("comprising[0-9a-z]+", "", file_t) formula_chk0 = re.findall(" formula | math ", file_t) formula_chk1 = re.findall(" tail ", file_t) formula_chk2 = re.findall(" lead ", file_t) if len(formula_chk0) > 0 and len(formula_chk1) > 0 and len(formula_chk2) > 0: file_t = re.sub(" formula | math ", " ", file_t) file_t = re.sub(" tail ", " ", file_t) file_t = re.sub(" lead ", " ", file_t) file_t = " ".join(file_t.split()) # split by any whitespace and rejoin w/ space file_t = file_t.split(" ") # split by space # remove the stop terms in the text file_stop = [] # initialize list for term in file_t: if term not in stopterms: file_stop.append(term) # stem using porter algorithm file_stem = [] # initialize list for term in file_stop: try: term = wn().lemmatize(term) except: pass term = str(term) file_stem.append(term) file_stem = ' '.join(file_stem) return file_stem except Exception as e: return HttpResponse( "Error running the program.") def remove_stopwords(file_t, stopterms): ''' Input: file_t: a text string, stopterms: a dictionary of stop terms Output: file_stem: a list of stopped and stemmed terms ''' # remove the patent specific terms try: file_t = file_t.lower() # lowercase all file_t = re.sub("[^a-zA-Z0-9 ]", " ", file_t) # remove non-alphanumeric file_t = re.sub("\s[0-9]+$", '', file_t) file_t = re.sub("\s[0-9]+\s", ' ', file_t) file_t = re.sub("^[0-9]+\s", '', file_t) file_t = re.sub("androids*", "antroid", file_t) file_t = re.sub("andes", "antes", file_t) file_t = re.sub("and[0-9a-z]+", "", file_t) file_t = re.sub("antroid", "android", file_t) file_t = re.sub("antes", "andes", file_t) file_t = re.sub("including[0-9a-z]+", "", file_t) file_t = re.sub("wherein[0-9a-z]+", "", file_t) file_t = re.sub("comprising[0-9a-z]+", "", file_t) formula_chk0 = re.findall(" formula | math ", file_t) formula_chk1 = re.findall(" tail ", file_t) formula_chk2 = re.findall(" lead ", file_t) if len(formula_chk0) > 0 and len(formula_chk1) > 0 and len(formula_chk2) > 0: file_t = re.sub(" formula | math ", " ", file_t) file_t = re.sub(" tail ", " ", file_t) file_t = re.sub(" lead ", " ", file_t) file_t = " ".join(file_t.split()) # split by any whitespace and rejoin w/ space file_t = file_t.split(" ") # split by space # remove the stop terms in the text file_stop = [] # initialize list for term in file_t: if term not in stopterms: file_stop.append(term) file_stop = ' '.join(file_stop) return file_stop except Exception as e: return HttpResponse( "Error running the program.") def remove_stopwords_journal(file_t, user_defined_stopwords): ''' Input: file_t: a text string, stopterms: a dictionary of stop terms Output: file_stem: a list of stopped and stemmed terms ''' try: stopterms = build_stopterms_journal(user_defined_stopwords) # remove the patent specific terms file_t = file_t.lower() # lowercase all file_t = re.sub("[^a-zA-Z0-9 ]", " ", file_t) # remove non-alphanumeric file_t = re.sub("\s[0-9]+$", '', file_t) file_t = re.sub("\s[0-9]+\s", ' ', file_t) file_t = re.sub("^[0-9]+\s", '', file_t) file_t = re.sub("androids*", "antroid", file_t) file_t = re.sub("andes", "antes", file_t) file_t = re.sub("and[0-9a-z]+", "", file_t) file_t = re.sub("antroid", "android", file_t) file_t = re.sub("antes", "andes", file_t) file_t = re.sub("including[0-9a-z]+", "", file_t) file_t = re.sub("wherein[0-9a-z]+", "", file_t) file_t = re.sub("comprising[0-9a-z]+", "", file_t) formula_chk0 = re.findall(" formula | math ", file_t) formula_chk1 = re.findall(" tail ", file_t) formula_chk2 = re.findall(" lead ", file_t) if len(formula_chk0) > 0 and len(formula_chk1) > 0 and len(formula_chk2) > 0: file_t = re.sub(" formula | math ", " ", file_t) file_t = re.sub(" tail ", " ", file_t) file_t = re.sub(" lead ", " ", file_t) file_t = " ".join(file_t.split()) # split by any whitespace and rejoin w/ space file_t = file_t.split(" ") # split by space # remove the stop terms in the text file_stop = [] # initialize list for term in file_t: if term not in stopterms: file_stop.append(term) return ' '.join(file_stop) except Exception as e: return HttpResponse( "Error running the program. ") def get_topic_list(model, feature_names, n_top_words): try: topic_list = [] n_top_words = int(n_top_words) for topic_idx, topic in enumerate(model.components_): topic_list.append(" | ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) return topic_list except Exception as e: return HttpResponse( "Error running the program.") ############# runDocumentClassifierSupervised ends######################### ############# saving and retreiving model starts######################### @csrf_exempt def save_both_existing_model(request): modelSaveStartTime = datetime.now() s3 = boto3.client('s3') INDEX_NAME = 'savemodelsupervised' trainingDataPatents = 'training_data_patents.txt' trainingDataJournals = 'training_data_journals.txt' trainedModel_file = "trainedModel.pkl" topicModelingFrame_file = 'topicModelingFrame.pkl' tfidf_vect_file = "tfidf_vect.pkl" user_defined_stopwords_file = "user_defined_stopwords.pkl" try: if request.method == 'POST': username = request.user.username; save_model_Name = request.POST.getlist('input')[0] saveName = json.loads(save_model_Name); if saveName['existingSaveModel']: saveProjectName = saveName['exisitingProjectName'] save_project_description = saveName['exisitingProjectDescription'] saveProjectValidation = saveName['saveProject'] saveModelDesc = saveName['existingModelDesc'] targetPerformanceMeasure = saveName['target_performance_measure'] modelName = saveName['trainedModelName'] elif saveName['newSaveModel']: saveProjectName = saveName['newProjectName'] save_project_description = saveName['newProjectDescription'] saveProjectValidation = saveName['saveProject'] saveModelDesc = saveName['newModelDesc'] targetPerformanceMeasure = saveName['target_performance_measure'] modelName = saveName['trainedModelName'] # index_name_patent = 'patentorjournal' # query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} # res = es_conn.search(index=index_name_patent, body=query) # patentOrJournalTrainingData = res['hits']['hits'][0]['_source']['patentOrJournalTrainingData'] if saveName['learningType'] == 'supervised': INDEX_NAME = 'savemodelsupervised' index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] data = res['hits']['hits'][0]['_source'] user_defined_stopwords = data['user_defined_stopwords'] trainingDataPerformances = data['trainingDataPerformance'] trainingDataPerformancesStandardDeviation = data['trainingDataPerformancesStandardDeviations'] trainingDataStatistics = data['trainingDataStatistics'] trainingDataType = data['trainingDataType'] trainedModelName = saveModelDesc + '_' + modelName model_data = { 'saved_date': datetime.now(), 'trainingDataNumInstances': data['trainingDataNumInstances'], # Training examples 'trainingDataNumClasses': data['trainingDataNumClasses'], # training classes 'trainingDataTables': trainingDataStatistics, # trainig data table 'trainingDataPerformances': trainingDataPerformances, # model evaluation before +/- 'trainingDataPerformancesStandardDeviation': trainingDataPerformancesStandardDeviation, # model evaluation after +/- 'str_parameter_name': data['str_parameter_name'], # Hyper parameter left before = 'trainedModelName': trainedModelName, 'optimal_model_parameter': data['optimal_model_parameter'], # Hyper parameter right after = 'learningType': 'supervised' } elif saveName['learningType'] == 'unsupervised': INDEX_NAME = 'savemodelunsupervised' index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] data = res['hits']['hits'][0]['_source'] user_defined_stopwords = data['user_defined_stopwords'] clusterTopicsAndCounts = data['clusterTopicsAndCounts'] numberOfClusters = data['numberOfClusters'] number_of_top_words = data['number_of_top_words'] testingDataType = data['training_data_type'] trainedModelName = saveModelDesc + '_K_Means_Clustering_NumClusters=' + str( numberOfClusters) + '_TopWords=' + str( number_of_top_words) model_data = { 'saved_date': datetime.now(), 'clusterTopicsAndCounts': json.dumps(clusterTopicsAndCounts), 'trainedModelName': trainedModelName, 'learningType': 'unsupervised' } save_model_data = { 'username': username, 'saveProjectName': saveProjectName, 'save_project_description': save_project_description, 'model_data': model_data, } # save_response = es_conn.indices.create(index=INDEX_NAME, ignore=400) if saveName['newSaveModel']: if saveProjectValidation: save_response = es_conn.create(index=INDEX_NAME, doc_type=TYPE_NAME_USER, body=save_model_data, id=uuid.uuid4()) elif saveName['existingSaveModel']: # this is for the append of model to the existing projectName if saveProjectValidation: save_response = es_conn.create(index=INDEX_NAME, doc_type=TYPE_NAME_USER, body=save_model_data, id=uuid.uuid4()) # this is for the replace of model to the existing projectName # else: # res = es_conn.update(index=INDEX_NAME, id=hits_id, body={"doc": {"model_data": model_data}}) # res = es_conn.delete_by_query(index=INDEX_NAME,body=query save_response_validation = save_response['_shards']['successful'] if save_response_validation > 0: es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"saved_project_status": 200}}) append_text_to_history_file = "\n" user_defined_stopwords = pickle.dumps(user_defined_stopwords) if saveName['learningType'] == 'supervised': finalTrainingData = request.FILES.getlist('trainFile') training_data = Process_All_Files(finalTrainingData) key_value = 'runDocumentClassifier/' key_value += username + '/' tfidf_vectorizer = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') tfidf_vect = tfidf_vectorizer['Body'].read() trainedModel = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') trainedModel = trainedModel['Body'].read() key_value = 'classificationprojects/' key_value += saveProjectName + '/supervised/' key_value += trainedModelName + '/' programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' append_text_to_history_file += '-' * 100 + '\n' append_text_to_history_file += 'username: ' + username + '\n' append_text_to_history_file += "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" append_text_to_history_file += '-' * 100 append_text_to_history_file += '\nProject name: ' + saveProjectName + '\n' append_text_to_history_file += 'Project description: ' + save_project_description + '\n' # if trainingDataType == 'Patent': append_text_to_history_file += 'Supervised learning model ' + trainedModelName + ' was trained on the PATENT training data file \n' elif trainingDataType == 'Journal': append_text_to_history_file += 'Supervised learning model ' + trainedModelName + ' was trained on the JOURNAL training data file \n' # write the number of instances and classes for tracking purposes numInstancesInTrainingData = 0 stringToDisplayTrainingDataStats = '{:<40s}{:>20s}{:>20s}'.format('Class', '# Examples', 'Class %') + '\n' for entry in json.loads(trainingDataStatistics): stringToDisplayTrainingDataStats += '{:<40s}{:>20s}{:>20s}'.format(str(entry[0]), str(entry[1]), str(entry[2])) + '\n' numInstancesInTrainingData += int(entry[1]) append_text_to_history_file += 'Total number of documents in the training data: ' + str( numInstancesInTrainingData) + '\n' append_text_to_history_file += 'Total number of classes in the training data: ' + str( len(trainingDataStatistics)) + '\n' append_text_to_history_file += 'The model parameters were optimized for \'' + targetPerformanceMeasure + '\'.' + '\n' append_text_to_history_file += '5-fold Cross Validation Performance: ' + '\n' perfMeasuresStr = ['Accuracy:', 'AUC:', 'Precision:', 'Recall:', 'F1:'] for i in range(len(trainingDataPerformances)): stringToWrite = '{:<10s}{:>10.2f}{:>4s}{:>10.2f}{:>1s}'.format(perfMeasuresStr[i], trainingDataPerformances[ i] * 100.0, '% +/- ', trainingDataPerformancesStandardDeviation[ i] * 100.0, '%') append_text_to_history_file += stringToWrite + '\n' append_text_to_history_file += programRunStartTimeLabel + '.' + '\n' s3.put_object(Body=tfidf_vect, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + tfidf_vect_file) s3.put_object(Body=user_defined_stopwords, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + user_defined_stopwords_file) s3.put_object(Body=trainedModel, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainedModel_file) if trainingDataType == 'Patent': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataPatents) elif trainingDataType == 'Journal': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataJournals) if 'Support_Vector_Machine' in trainedModelName: try: model_sigmoid_calibrations = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') model_sigmoid_calibration = model_sigmoid_calibrations['Body'].read() model_sigmoid_calibration = pickle.loads(model_sigmoid_calibration) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so no need to deduplicate based on previous data. model_isotonic_calibrations = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') model_isotonic_calibration = model_isotonic_calibrations['Body'].read() model_sigmoid_calibration = pickle.loads(model_isotonic_calibration) else: model_isotonic_calibrations = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') model_isotonic_calibration = model_isotonic_calibrations['Body'].read() model_sigmoid_calibration = pickle.loads(model_isotonic_calibration) pass if model_isotonic_calibration != None: model_isotonic_calibration = pickle.dumps(model_isotonic_calibration) s3.put_object(Body=model_isotonic_calibration, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') if model_sigmoid_calibration != None: model_sigmoid_calibration = pickle.dumps(model_sigmoid_calibration) s3.put_object(Body=model_sigmoid_calibration, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') elif saveName['learningType'] == 'unsupervised': finalTrainingData = request.FILES.getlist('trainFile') training_data = Process_All_Files(finalTrainingData) key_value = 'classificationprojects/' key_value += saveProjectName + '/unsupervised/' key_value += trainedModelName + '/' programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + '. ' append_text_to_history_file += '-' * 100 + '\n' append_text_to_history_file += 'username: ' + username + '\n' append_text_to_history_file += "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" append_text_to_history_file += '-' * 100 append_text_to_history_file += '\nProject name: ' + saveProjectName + '\n' append_text_to_history_file += 'Project description: ' + save_project_description + '\n' # if testingDataType == 'Patent': append_text_to_history_file += 'Unsupervised learning model ' + trainedModelName + ' was trained on the PATENT training data file \n' elif testingDataType == 'Journal': append_text_to_history_file += 'Unsupervised learning model ' + trainedModelName + ' was trained on the JOURNAL training data file \n' # write the number of instances and classes for tracking purposes append_text_to_history_file += programRunStartTimeLabel + '.' + '\n' key_value = 'runDocumentClassifierUnsupervised/' key_value += username + '/' tfidf_vectorizer = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') tfidf_vect = tfidf_vectorizer['Body'].read() trainedModel = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') trainedModel = trainedModel['Body'].read() topicModelingFrame = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') topicModelingFrame = topicModelingFrame['Body'].read() key_value = 'classificationprojects/' key_value += saveProjectName + '/unsupervised/' key_value += trainedModelName + '/' s3.put_object(Body=tfidf_vect, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + tfidf_vect_file) s3.put_object(Body=user_defined_stopwords, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + user_defined_stopwords_file) s3.put_object(Body=trainedModel, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainedModel_file) s3.put_object(Body=topicModelingFrame, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + topicModelingFrame_file) if testingDataType == 'Patent': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataPatents) elif testingDataType == 'Journal': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataJournals) modelSaveEndTime = datetime.now() timeDifference = relativedelta(modelSaveEndTime, modelSaveStartTime) modelSavingTimeLabel = "Saving the model took %d hours %d minutes %d seconds." % ( timeDifference.hours, timeDifference.minutes, timeDifference.seconds) append_text_to_history_file += modelSavingTimeLabel + '\n' + '*' * 95 + '\n' try: response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') history_file_old_text = response2['Body'].read().decode('utf-8') append_text_to_history_file = history_file_old_text + append_text_to_history_file s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing history file, so create a new history file and write the history into that file in S3. s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass res = { 'message': 'saved successfully' } return JsonResponse(res) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def save_both_validation(request): try: if request.method == 'POST': userName = request.user.username; save_model_Name = request.body.decode('utf-8'); saveName = json.loads(save_model_Name); if saveName['existingSaveModel']: saveProjectName = saveName['exisitingProjectName'] saveModelDesc = saveName['existingModelDesc'] trainedModelName = saveName['trainedModelName'] elif saveName['newSaveModel']: saveProjectName = saveName['newProjectName'] saveModelDesc = saveName['newModelDesc'] trainedModelName = saveName['trainedModelName'] if saveName['learningType'] == 'supervised': INDEX_NAME = 'savemodelsupervised' elif saveName['learningType'] == 'unsupervised': INDEX_NAME = 'savemodelunsupervised' if saveName['newSaveModel']: query = {"query": {"bool": {"must": {"match": {"saveProjectName.keyword": saveProjectName}}}}} res = es_conn.search(index=INDEX_NAME, body=query) res_hits = res['hits'] res_hits_hits = res_hits['hits'] hits_length = len(res_hits_hits) if hits_length == 0: saveProject = True else: saveProject = False # res = es_conn.delete_by_query(index=INDEX_NAME,body=query elif saveName['existingSaveModel']: modelName = saveModelDesc + '_' + trainedModelName query = {"query": { "bool": { "must": [{"match": {"model_data.trainedModelName.keyword": modelName}}, {"match": {"saveProjectName.keyword": saveProjectName}}]}}} res = es_conn.search(index=INDEX_NAME, body=query) res_hits = res['hits'] res_hits_hits = res_hits['hits'] hits_length = len(res_hits_hits) model_data_count = 0 # this is for the append of model to the existing projectName if hits_length == 0: saveProject = True # this is for the replace of model to the existing projectName else: saveProject = False # res = es_conn.delete_by_query(index=INDEX_NAME,body=query return HttpResponse(saveProject) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def retrieve_existing_Project_name(request): try: if request.method == 'POST': save_model_Name = request.body.decode('utf-8'); saveName = json.loads(save_model_Name); if saveName['learningType'] == 'supervised': INDEX_NAME = 'savemodelsupervised' elif saveName['learningType'] == 'unsupervised': INDEX_NAME = 'savemodelunsupervised' query = {"query": {"match_all": {}}} res = es_conn.search(index=INDEX_NAME, body=query) return JsonResponse(res) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def retreieve_Model_for_seleted(request): try: if request.method == 'POST': save_model_Name = request.body.decode('utf-8'); saveName = json.loads(save_model_Name); if saveName['learningType'] == 'supervised': saveProjectName = saveName['exisitingProjectName'] INDEX_NAME = 'savemodelsupervised' elif saveName['learningType'] == 'unsupervised': saveProjectName = saveName['exisitingProjectName'] INDEX_NAME = 'savemodelunsupervised' query = {"query": {"bool": {"must": {"match": {"saveProjectName.keyword": saveProjectName}}}}} res = es_conn.search(index=INDEX_NAME, body=query) return JsonResponse(res) except Exception as e: return HttpResponse( "Error running the program.") ############# saving and retreiving model ends######################### @csrf_exempt def runUnsupervisedSaving(request): try: if request.method == 'POST': userName = request.user.username; update_fields = { 'username': userName, 'progressbar_maximum': 0, # progress bar max_value 'progressbar_value': 0, # progress bar value 'progressbarlabel_text': '', # progress field 'progress_text': '', # progress text 'clusterTopicsAndCounts': '', 'final_progress_value': '', 'current_tab': 0, 'errorString': '', 'numberOfClusters': 0, 'number_of_top_words': 0, "topicModelingFrame": '', "training_data_type": '', "testing_data_type": '' } index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # es_conn.indices.create(index=index_name_gloabals) es_conn.delete_by_query(index=index_name_gloabals, body=query) es_conn.create(index=index_name_gloabals, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def userRunModelTrackUSL(request): try: if request.method == 'POST': try: userName = request.user.username; userTrack = request.body.decode('utf-8'); userTrack = json.loads(userTrack); trainingFileName = userTrack['training_file_name'] trainingDataType = userTrack['training_data_type'] str_model_name = userTrack['model'] additional_stopwords = userTrack['additional_stopwords'] number_of_top_words = userTrack['number_of_top_words'] number_of_clusters = userTrack['number_of_clusters'] except Exception as e: print('parsing went wrong', e) time = datetime.now() time = time.strftime("%I:%M%p on %B %d, %Y") update_fields = { 'username': userName, 'trainingFileName': trainingFileName, 'trainingDataType': trainingDataType, 'str_model_name': str_model_name, 'additional_stopwords': additional_stopwords, 'number_of_top_words': number_of_top_words, 'number_of_clusters': number_of_clusters, 'time': time + ' UTC Time' } index_name = 'userrunmodeltrackusl' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") ############## runDocumentClassifierUnsupervised starts################################ @csrf_exempt def runDocumentClassifierUnsupervised(request): current_tab = 2 global response try: # Assuming that the client-side has already selected the options before running the program. # Assuming that the request from the client side will have all the fields necessary for running the program. if request.method == "GET": return response elif request.method == "POST": try: userName = request.user.username; gui_parameters = request.POST.getlist('inputData')[0] gui_parameters = json.loads(gui_parameters); finalTrainingData = request.FILES.getlist('trainFile') training_data = Process_All_Files(finalTrainingData) except Exception as e: print('parsing went wrong', e) index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] # Set maximum value for progressbar. Reserve 20 for preprocessing and 30 for clustering. Additionally, for each cluster, reserve 5 for topic modeling progressbar_maximum = 50 + 5 * int(gui_parameters['number_of_clusters']) progressbar_value = 0 # Set the text in progressbarlabel programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progress_text = '' progressbarlabel_text = programRunStartTimeLabel progress_text = progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + \ '-' * 75 + '\n' + "Starting document classification process..." # output_folder = os.path.dirname(selectedDataFilePath) # The preprocessing is different for patent and journal data numberOfClusters = gui_parameters['number_of_clusters'] number_of_top_words = gui_parameters['number_of_top_words'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": 0, 'progressbarlabel_text': progressbarlabel_text, 'progress_text': progress_text, "progressbar_maximum": progressbar_maximum, "numberOfClusters": numberOfClusters, "number_of_top_words": number_of_top_words, "current_tab": current_tab}}) # index_name = 'trainingfiledata' key_value = 'runDocumentClassifierUnsupervised/' key_value += userName + '/' s3 = boto3.client('s3') if gui_parameters['training_data_type'] == 'Patent': # query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # res = es_conn.search(index=index_name, body=query) # training_data = res['hits']['hits'][0]['_source']['training_data'] # Read the paper data from the file # [type, title, abstract, author, affiliation, year] file_data_open = training_data file_data_open = file_data_open.split('\n') # split by new line file_data_open = list(filter(None, file_data_open)) # delete empty lines # Now, the first line is a header, so remove it file_data_open = file_data_open[1:] progressbar_value = 5 progress_text = progress_text + "\nFound " + str( len(file_data_open)) + " documents! \nPreprocessing documents...\n" # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # Append user-provided stop words to the stop words list user_defined_stopwords = (" ".join(gui_parameters['additional_stopwords'].lower().split(','))).split() es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, 'progress_text': progress_text, "user_defined_stopwords": user_defined_stopwords, "training_data_type": gui_parameters['training_data_type']}}) stops = stops + user_defined_stopwords # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 (file_data_proc, file_data_stem, file_data_stop_words_removed) = preprocess_collection(file_data_open, stopterms, True, progress_text) file_data_proc = list(filter(None, file_data_proc)) file_data = list(filter(None, file_data_stem)) file_data_stop_words_removed = list(filter(None, file_data_stop_words_removed)) title_data = [doc.split('\t')[1] for doc in file_data] abstract_data = [doc.split('\t')[2] for doc in file_data] claim_data = [doc.split('\t')[3] for doc in file_data] unlabeled_data = [' '.join(doc) for doc in zip(title_data, abstract_data, claim_data)] title_data_stop_words_removed = [doc.split('\t')[1] for doc in file_data_stop_words_removed] abstract_data_stop_words_removed = [doc.split('\t')[2] for doc in file_data_stop_words_removed] claim_data_stop_words_removed = [doc.split('\t')[3] for doc in file_data_stop_words_removed] unlabeled_data_stop_words_removed = [' '.join(doc) for doc in zip(title_data_stop_words_removed, abstract_data_stop_words_removed, claim_data_stop_words_removed)] progressbar_value += 5 progress_text = progress_text + "Removed duplicates and preprocessed " + str( len(unlabeled_data)) + " documents." + \ "\nStarting unsupervised learning..." # Changed the n-grams to (1,5) in the line below tfidf_vect = TfidfVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, max_df=0.8, max_features=200000, stop_words='english', use_idf=True) # tf-idf with params unlabeled_data_tfidf = tfidf_vect.fit_transform(unlabeled_data) tfidfVectorizer = tfidf_vect tfidf = pickle.dumps(tfidfVectorizer) s3.put_object(Body=tfidf, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') # Increment progress bar value by 10 for vectorization progressbar_value += 10 # Model and model parameters model = gui_parameters['model'] number_of_clusters = gui_parameters['number_of_clusters'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, 'progress_text': progress_text, "model": model}}) if model == 'K-means Clustering': # determine the best model based on evaluating several models automatic_mode = True progress_text = progress_text + "\nPerforming clustering on the data using K-means clustering..." # Added additonal checks to avoid errors and exceptions # 1. Check that the number of documents in OTHERS category is at least greater than 2 if len(unlabeled_data) <= 2: progress_text = progress_text + "\nThere are only " + str(len( unlabeled_data)) + " documents in OTHERS category. Cannot perform clustering on less than or equal to 2 documents." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) return HttpResponse("Error running the program.") # 2. Check that the number of clusters specified is greater than or equal to the number of documents categorized as OTHERS if len(unlabeled_data) < int(numberOfClusters): progress_text = progress_text + "\nThe number of documents in OTHERS category is less than the number of clusters provided in the GUI. Cannot perform clustering in this case." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) return HttpResponse("Error running the program.") output_filename = re.sub('.txt', '_Results_Topic_Modeling.txt', gui_parameters['training_file_name']) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) # step2: topic extraction and clustering if len(unlabeled_data) > 2 and len(unlabeled_data) >= int(numberOfClusters): # Added random_state for fixing results of clustering random_state_kmeans = np.random.RandomState(seed=6758) km = MiniBatchKMeans(n_clusters=int(numberOfClusters), random_state=random_state_kmeans) km.partial_fit(unlabeled_data_tfidf) trainedModel = km trainedModelName = 'K_Means_Clustering_NumClusters=' + str( numberOfClusters) + '_TopWords=' + str( number_of_top_words) clusters = km.labels_.tolist() # start checking code below: test_patent_others = {'content': unlabeled_data, 'file_others': file_data_proc, 'content_stop_words_removed': unlabeled_data_stop_words_removed, 'cluster': clusters} frame = pd.DataFrame(test_patent_others, index=[clusters], columns=['content', 'file_others', 'content_stop_words_removed', 'cluster']) topicModelingFrame = frame clusterTopicsAndCounts = [] clustering_successful = False # Increment progrss bar by 30, for clustering complete progressbar_value += 30 trainedModel = pickle.dumps(trainedModel) s3.put_object(Body=trainedModel, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') topicModelingFrame = pickle.dumps(topicModelingFrame) s3.put_object(Body=topicModelingFrame, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progressbar_value': progressbar_value, "trainedModelName": trainedModelName}}) # output lda other topics # fout_others = {} # fout_others = open(output_filename) # with open(output_filename, 'w') as fout_others: fout_others = '' for no in range(int(numberOfClusters)): try: # sometimes, there is no document in the group, so handle that case with try and except patent_group = frame.groupby(frame['cluster']).get_group(no) except: # continue, because there is no document in this cluster. Move on to the topic modeling for next cluster continue patent_tac = patent_group.ix[:, 0].tolist() patent_org = patent_group.ix[:, 1].tolist() patent_tac_stop_words_removed = patent_group.ix[:, 2].tolist() lda_tf_vect = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = None try: tf = lda_tf_vect.fit_transform(patent_tac_stop_words_removed) except Exception as e: lda_tf_vect = TfidfVectorizer(max_df=1.0, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = lda_tf_vect.fit_transform(patent_tac_stop_words_removed) # LDA Model lda = LatentDirichletAllocation(n_components=1, max_iter=20, learning_method='online', learning_offset=50, random_state=0).fit(tf) lda_feature_names = lda_tf_vect.get_feature_names() lda_topics = get_topic_list(lda, lda_feature_names, number_of_top_words) clusterTopicsAndCounts.append([len(patent_tac), lda_topics[0]]) doc_topic = lda.transform(tf) doc_topic_index = doc_topic.argmax(axis=1) for doc, doc_topic_i in zip(patent_org, doc_topic_index): fout_others += '\t'.join([doc.strip('\r').strip('\n'), lda_topics[doc_topic_i].strip('\t').strip('\r').strip( '\n')]) + '\n' clustering_successful = True # For each cluster's topic modeling, increment progressbar by 5 progressbar_value += 5 if clustering_successful == True: progress_text = progress_text + '\nTopic extraction and clustering completed.' # Load the topic modeling results in the treeview # Compute the columns: (i) # instances, (ii) Topics extracted # populateTreeviewWithTopicModelingResults(apollo4.globals.clusterTopicsAndCounts) progress_text = progress_text + '\nPlease click on Download File button to download the file.' elif len(unlabeled_data) > 0 and ( len(unlabeled_data) <= 2 or len(unlabeled_data) >= int(numberOfClusters)): progress_text = progress_text + "\nTopic extraction could not be performed." else: progress_text = progress_text + "\nNo patent paper found for topic extraction." programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "\nProgram run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progress_text = progress_text + programRunStartTimeLabel es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progressbar_value': progressbar_value, "progress_text": progress_text, "clusterTopicsAndCounts": clusterTopicsAndCounts}}) response = HttpResponse(content=fout_others, content_type='text/plain') response['Content-Disposition'] = 'attachment; filename=' + output_filename final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value}}) return HttpResponse('successfully patents executed patent') # FOR JOURNAL DATA elif gui_parameters['training_data_type'] == 'Journal': # query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # res = es_conn.search(index=index_name, body=query) # training_data = res['hits']['hits'][0]['_source']['training_data'] file_data_open = training_data file_data_open = file_data_open.split('\n') # split by new line file_data_open = list(filter(None, file_data_open)) # delete empty lines # Now, the first line is a header, so remove it file_data_open = file_data_open[1:] progressbar_value = 5 progress_text = progress_text + "Found " + str( len(file_data_open)) + " documents! \nPreprocessing documents...\n" # Remove the duplicated documents based on "title" file_data_open = dedup_collection_journal(file_data_open, 1, 2) # Preprocessing for scoupus data file_data_open = preprocess_collection_journal(file_data_open) user_defined_stopwords = gui_parameters['additional_stopwords'].lower() es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, 'progress_text': progress_text, "user_defined_stopwords": user_defined_stopwords, "training_data_type": gui_parameters['training_data_type']}}) unlabeled_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_data_open] unlabeled_data_stop_words_removed = [ ' '.join([remove_stopwords_journal(doc.split('\t')[1], user_defined_stopwords) , remove_stopwords_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_data_open] progressbar_value += 5 progress_text = progress_text + "Removed duplicates and preprocessed " + str( len(unlabeled_data)) + " documents." # Changed the n-grams to (1,5) in the line below tfidf_vect = TfidfVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, max_df=0.8, max_features=200000, stop_words='english', use_idf=True) # tf-idf with params unlabeled_data_tfidf = tfidf_vect.fit_transform(unlabeled_data) tfidfVectorizer = tfidf_vect tfidf = pickle.dumps(tfidfVectorizer) s3.put_object(Body=tfidf, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') # Increment progress bar value by 10 for vectorization progressbar_value += 10 # Model and model parameters model = gui_parameters['model'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, 'progress_text': progress_text, "model": model}}) if model == 'K-means Clustering': # determine the best model based on evaluating several models automatic_mode = True progress_text = progress_text + "\nPerforming clustering on the data using K-means clustering..." # Added additonal checks to avoid errors and exceptions # 1. Check that the number of documents in OTHERS category is at least greater than 2 if len(unlabeled_data) <= 2: progress_text = progress_text + "\nThere are only " + str(len( unlabeled_data)) + " documents in OTHERS category. Cannot perform clustering on less than or equal to 2 documents." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) return HttpResponse( "Error running the program. Please contact the IP Group Analytics Team") # 2. Check that the number of clusters specified is greater than or equal to the number of documents categorized as OTHERS if len(unlabeled_data) < int(numberOfClusters): progress_text = progress_text + "\nThe number of documents in OTHERS category is less than the number of clusters provided in the GUI. Cannot perform clustering in this case." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) return HttpResponse( "Error running the program.") output_filename = re.sub('.txt', '_Results_Topic_Modeling.txt', gui_parameters['training_file_name']) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) # 3: topic extraction and clustering if len(unlabeled_data) > 2 and len(unlabeled_data) >= int(numberOfClusters): # Added random_state for fixing results of clustering random_state_kmeans = np.random.RandomState(seed=6758) km = MiniBatchKMeans(n_clusters=int(numberOfClusters), random_state=random_state_kmeans) km.partial_fit(unlabeled_data_tfidf) trainedModel = km trainedModelName = 'K_Means_Clustering_NumClusters=' + str( numberOfClusters) + '_TopWords=' + str( number_of_top_words) clusters = km.labels_.tolist() test_patent_others = {'content': unlabeled_data, 'file_others': file_data_open, 'content_stop_words_removed': unlabeled_data_stop_words_removed, 'cluster': clusters} frame = pd.DataFrame(test_patent_others, index=[clusters], columns=['content', 'file_others', 'content_stop_words_removed', 'cluster']) topicModelingFrame = frame clusterTopicsAndCounts = [] # Increment progress bar by 30, for clustering complete progressbar_value += 30 trainedModel = pickle.dumps(trainedModel) s3.put_object(Body=trainedModel, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') topicModelingFrame = pickle.dumps(topicModelingFrame) s3.put_object(Body=topicModelingFrame, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progressbar_value': progressbar_value, "trainedModelName": trainedModelName}}) # output lda other topics # with open(output_filename, 'w') as fout_others: fout_others = '' for no in range(int(numberOfClusters)): patent_group = frame.groupby(frame['cluster']).get_group(no) patent_tac = patent_group.ix[:, 0].tolist() patent_org = patent_group.ix[:, 1].tolist() clustering_successful = False try: lda_tf_vect = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = lda_tf_vect.fit_transform(patent_tac) # LDA Model lda = LatentDirichletAllocation(n_components=1, max_iter=20, learning_method='online', learning_offset=50, random_state=0).fit(tf) lda_feature_names = lda_tf_vect.get_feature_names() lda_topics = get_topic_list(lda, lda_feature_names, number_of_top_words) clusterTopicsAndCounts.append([len(patent_tac), lda_topics[0]]) doc_topic = lda.transform(tf) doc_topic_index = doc_topic.argmax(axis=1) for doc, doc_topic_i in zip(patent_org, doc_topic_index): fout_others += '\t'.join( [doc, lda_topics[doc_topic_i].strip('\r').strip('\n')]) + '\n' clustering_successful = True except: progress_text = progress_text + '\nERROR: Unexpected error.' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) return HttpResponse( "Error running the program.") # For each cluster's topic modeling, increment progressbar by 5 progressbar_value += 5 if clustering_successful == True: progress_text = progress_text + '\nTopic extraction and clustering completed.' progress_text = progress_text + "\nPlease download the " + output_filename + " file and check all the results in the file." elif len(unlabeled_data) > 0 and ( len(unlabeled_data) <= 2 or len(unlabeled_data) >= int(numberOfClusters)): progress_text = progress_text + "\nTopic extraction could not be performed." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) else: progress_text = progress_text + "\nNo journal paper found for topic extraction." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text}}) programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel response = HttpResponse(content=fout_others, content_type='text/plain') response['Content-Disposition'] = 'attachment; filename=' + output_filename final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progress_text': progress_text, "progressbar_value": progressbar_value, "clusterTopicsAndCounts": clusterTopicsAndCounts, "progressbarlabel_text": progressbarlabel_text, "final_progress_value": final_progress_value}}) return HttpResponse('successfully executed') except Exception as e: errorString = 'Error while running the program please contact IP Group Analytics team.' final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'errorString': errorString, "final_progress_value": final_progress_value}}) return HttpResponse( "Error running the program.") @csrf_exempt def fetch_update_unsupervised(request): # dumps_clusterTopicsAndCounts = list(apollo4.globals.clusterTopicsAndCounts) try: if request.method == 'POST': userName = request.user.username; index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) data = res['hits']['hits'][0]['_source'] return JsonResponse({'data': data}) except Exception as e: return HttpResponse( "Error running the program.") ############## runDocumentClassifierUnsupervised ends ################################ ############################# patent scoring functions starts ########################### def remove_tab(text): try: if not text: return text # return empty string if string is empty text = text.replace('\n', ' ').replace('\t', ' ').strip() # remove line break and tab return text except Exception as e: return HttpResponse( "Error running the program. Please contact the IP Group Analytics Team () to resolve the issue. Please provide the error details below in your email. \nPlease provide all the steps to reproduce this issue. \n" + "-" * 40 + "\n" + str( e) + "\n" + "-" * 40) def preprocess_collection_PatentScoring(file_open, stopterms, inventionDisclosure=False): try: progress_text = '' if inventionDisclosure: # Preprocess the text file containing text of invention disclosure # stop and stem title, abstract, claim file_stem = stop_and_stem(file_open[0], stopterms) file_proc = remove_tab(file_open[0]) output = ([file_proc], [file_stem]) progress_text += '\nStopwords removed and terms stemmed in the keyword search file.' else: # preprocess patent data as usual # de-duplication apn_cur = [] # list of current used application numbers file_dedup = [] # the collection of deduped patents counter = 0 # num of deduped patents num_docs = len(file_open) for index, doc in enumerate(file_open, start=1): try: # print "de-duplication document %d of %d" % (index,num_docs) apn = doc.split("\t")[4] if apn not in apn_cur: file_dedup.append(doc) apn_cur.append(apn) counter = counter + 1 except Exception as e: # print "ERROR: document %d could not be processed" % index print(e) # step through the deduped patent collection file_proc = [] file_stem = [] design_count = 0 # counter for design cases utility_count = 0 # counter for utility cases for doc_no, doc in enumerate(file_dedup, start=1): # print "pre-processing document %d of %d" % (doc_no,len(file_open)) fields = doc.split("\t") try: no = str(doc_no) file_t = remove_tab(fields[1]) # title file_a = remove_tab(fields[2]) # abstract file_c = remove_tab(fields[3]) # claims apn = fields[4].lower() apd = fields[5] asgn = fields[6].lower() # upc = fields[7].lower() except Exception as e: print(e) if apn.startswith("us2"): # filter out design cases progress_text += 'Design patent found! App_No: %r' % (apn) design_count = design_count + 1 continue if apn.startswith("us"): utility_count = utility_count + 1 # stop and stem title, abstract, claim file_t_stem = stop_and_stem(file_t, stopterms) file_a_stem = stop_and_stem(file_a, stopterms) file_c_stem = stop_and_stem(file_c, stopterms) # Output the orginal clean version of utility patent file_proc.append([no, file_t, file_a, file_c, apn, apd, asgn]) # Output the stemmed version of utility patent file_stem.append([no, file_t_stem, file_a_stem, file_c_stem, apn, apd, asgn]) output = (file_proc, file_stem) # progress_text += '\nstopwords removed, terms stemmed, documents de-duplicated, design removed' # progress_text += '\n%d unique documents out of %d total' % (counter, num_docs) # progress_text += '\n%d design documents out of %d total' % (design_count, num_docs) # progress_text += '\n%d utility documents out of %d total' % (utility_count, num_docs) return output except Exception as e: return HttpResponse( "Error running the program." + str(e)) # Note that the function below will be used to validate both Samsung Patent data file and Non-Samsung patent data file that the user will upload in GUI. @csrf_exempt def patentScoringGlobals(request): try: if request.method == 'POST': userName = request.user.username; update_fields = { 'username': userName, 'progressbar_maximum': 0, # progress bar max_value 'progress_value': 0, # progress bar value 'progressbarlabel_text': '', # progress field 'progress_text': '', # progress text 'final_progress_value': '', 'current_tab': 0, 'errorString': '', "training_data_type": '', "testing_data_type": '' } index_name_gloabals = 'apolloglobalspatentscoring' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # es_conn.indices.create(index=index_name_gloabals) es_conn.delete_by_query(index=index_name_gloabals, body=query) es_conn.create(index=index_name_gloabals, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def userRunModelTrackPS(request): try: if request.method == 'POST': try: userName = request.user.username; userTrack = request.body.decode('utf-8'); userTrack = json.loads(userTrack); filename_NonSamsung_Patents = userTrack['filename_NonSamsung_Patents'] training_data_type = userTrack['training_data_type'] testing_data_type = userTrack['testing_data_type'] keywords = userTrack['keywords'] searchType = userTrack['searchType'] except Exception as e: print('parsing went wrong', e) time = datetime.now() time = time.strftime("%I:%M%p on %B %d, %Y") update_fields = { 'username': userName, 'filename_NonSamsung_Patents': filename_NonSamsung_Patents, 'training_data_type': training_data_type, 'testing_data_type': testing_data_type, 'keywords': keywords, 'searchType': searchType, 'time': time + ' UTC Time' } index_name = 'userrunmodeltrackps' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def patentScoringData(request): superVisedResponse = {} global response try: if request.method == "POST": response = None get_value = request.body get_value = get_value.decode("utf-8") if "identification number\ttitle\tabstract\tclaims\tapplication number\tapplication date\tcurrent assignee\tupc" in get_value.lower(): checkPatentData = 'Patent' elif "keywords" in get_value.lower(): checkPatentData = 'Keywords' elif 'nasca' in get_value.lower(): checkPatentData = 'NASCA File Error.' else: checkPatentData = 'Patent File Error.' return HttpResponse(checkPatentData) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def computeSimilarityBetweenSamsungAndNonSamsungPatents(request): global response if response != None: return response else: try: if request.method: username = request.user.username; get_value = request.body get_value = get_value.decode("utf-8") get_value = json.loads(get_value) output_fext = splitext(get_value['filename_NonSamsung_Patents'])[1] output_response_name = splitext(get_value['filename_NonSamsung_Patents'])[0] training_data_type = get_value['training_data_type'], testing_data_type = get_value['testing_data_type'], searchType = get_value['searchType'], keywords = get_value['keywords'], current_tab = 7 index_name_gloabals = 'apolloglobalspatentscoring' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] progress_text = "" progress_value = 0 progressbar_maximum = 200 programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progressbarlabel_text = programRunStartTimeLabel progress_text += progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + \ '-' * 75 + '\n' + "Starting document classification process..." # filename_NonSamsung_Patents es_conn.update(index=index_name_gloabals, id=id, body={"doc": {'progressbarlabel_text': progressbarlabel_text, 'progress_text': progress_text, "progressbar_maximum": progressbar_maximum, "current_tab": current_tab, "training_data_type": training_data_type, "testing_data_type": testing_data_type}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index='testingfiledata', body=query) testing_data = res['hits']['hits'][0]['_source']['testing_data'] file_NonSamsungPatents_open = testing_data file_NonSamsungPatents_open = file_NonSamsungPatents_open.split('\n') # split by new line file_NonSamsungPatents_open = list(filter(None, file_NonSamsungPatents_open)) # delete empty lines file_NonSamsungPatents_open = file_NonSamsungPatents_open[1:] progress_value += 5 progress_text += '\nPreprocessing related patents file...' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_value": progress_value, 'progress_text': progress_text}}) (file_NonSamsungPatents_proc, file_NonSamsungPatents_stem) = preprocess_collection_PatentScoring( file_NonSamsungPatents_open, stopterms, inventionDisclosure=False) file_NonSamsungPatents_proc = list( filter(None, file_NonSamsungPatents_proc)) # patent list after dedup and remove the design patents file_NonSamsungPatents_stem = list(filter(None, file_NonSamsungPatents_stem)) # patent list after stem # Check whether input file contains search keywords, instead of patent list keywordSearch = False if searchType[0].lower() == 'keywords': keywordSearch = True file_SamsungPatents_open = [" ".join(keywords[0].lower().split(','))] else: res = es_conn.search(index='trainingfiledata', body=query) training_data = res['hits']['hits'][0]['_source']['training_data'] file_SamsungPatents_open = training_data file_SamsungPatents_open = file_SamsungPatents_open.split('\n') # split by new line file_SamsungPatents_open = list(filter(None, file_SamsungPatents_open)) # delete empty lines file_SamsungPatents_open = file_SamsungPatents_open[1:] progress_value += 5 if keywordSearch: progress_text += '\nPreprocessing search keywords file...' else: progress_text += '\nPreprocessing input patents file...' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_value": progress_value, 'progress_text': progress_text}}) (file_SamsungPatents_proc, file_SamsungPatents_stem) = preprocess_collection_PatentScoring( file_SamsungPatents_open, stopterms, inventionDisclosure=keywordSearch) file_SamsungPatents_proc = list( filter(None, file_SamsungPatents_proc)) # patent list after dedup and remove the design patents file_SamsungPatents_stem = list(filter(None, file_SamsungPatents_stem)) # patent list after stem progress_value += 15 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_value": progress_value, 'progress_text': progress_text}}) # combine title, abstract, claim if keywordSearch: tac_SamsungPatents = [file_SamsungPatents_stem] progressbar_maximum = 100 + len(file_NonSamsungPatents_proc) else: tac_SamsungPatents = [' '.join([doc[1], doc[2], doc[3]]) for doc in file_SamsungPatents_stem] progressbar_maximum = 100 + len(file_NonSamsungPatents_proc) * len(tac_SamsungPatents) tac_NonSamsungPatents = [' '.join([doc[1], doc[2], doc[3]]) for doc in file_NonSamsungPatents_stem] num_SamsungPatents = len(tac_SamsungPatents) num_NonSamsungPatents = len(tac_NonSamsungPatents) # debug print if keywordSearch: docsALL = tac_SamsungPatents[0] + tac_NonSamsungPatents else: docsALL = tac_SamsungPatents + tac_NonSamsungPatents doc_terms = [] for doc in docsALL: doc_terms = doc_terms + doc.split(' ') # LSA # calculate the tf-idf for the doc-term matrix cvTFIDF = TfidfVectorizer(norm='l1') matrix_TFIDF = cvTFIDF.fit_transform(docsALL) (num_docs, num_features) = np.shape(matrix_TFIDF) progress_value += 25 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_value": progress_value}}) if num_features > 100: num_features = 100 # only keep the top 100 features svd = TruncatedSVD(n_components=num_features) # dimension reduction to low rank matrix matrix_TFIDF_SVD = svd.fit_transform(matrix_TFIDF) matrix_SamsungPatents = matrix_TFIDF_SVD[0:num_SamsungPatents, :] matrix_NonSamsungPatents = matrix_TFIDF_SVD[num_SamsungPatents:, :] cos_d_all = cosine_similarity(matrix_SamsungPatents, matrix_NonSamsungPatents) # zipObj = ZipFile(splitext(get_value['filename_NonSamsung_Patents'])[0] + '.zip', 'w') zipFileName = splitext(get_value['filename_NonSamsung_Patents'])[0] + '.zip' zipIO = io.BytesIO() zipObj = ZipFile(zipIO, 'w') progress_value += 25 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_value": progress_value}}) if keywordSearch: # output the ranked top 5 patent list output_file = 'TopFiveKeywordSearchResults.txt' cos_d = cos_d_all[0, :] file_NonSamsungPatents_sort = (np.array(file_NonSamsungPatents_proc)[np.argsort(cos_d)]).tolist()[ ::-1] # to list of list cos_d_sort = (np.sort(cos_d)).tolist()[::-1] # to list tempString = '' numberOfPatentsToDisplay = 5 patentCounter = 0 for doc, score in zip(file_NonSamsungPatents_sort, cos_d_sort): progress_value += 1 if patentCounter < numberOfPatentsToDisplay: patentCounter += 1 samsungPatent_apd2 = datetime.strptime(doc[5], '%m/%d/%Y') tempString += '\t'.join(doc + ["{:.2%}".format(score)]) + '\n' else: break zipObj.writestr(output_file, tempString) else: # output the ranked reference patent list of ith samsung patent for samsungPatent_idx, samsungPatent_doc in enumerate(file_SamsungPatents_proc): samsungPatent_apn = samsungPatent_doc[4].replace('/', '-') samsungPatent_apd = datetime.strptime(samsungPatent_doc[5], '%m/%d/%Y') output_file = samsungPatent_apn + '_rank' + output_fext cos_d = cos_d_all[samsungPatent_idx, :] file_NonSamsungPatents_sort = (np.array(file_NonSamsungPatents_proc)[ np.argsort(cos_d)]).tolist()[ ::-1] # to list of list cos_d_sort = (np.sort(cos_d)).tolist()[::-1] # to list tempString = '' for doc, score in zip(file_NonSamsungPatents_sort, cos_d_sort): samsungPatent_apd2 = datetime.strptime(doc[5], '%m/%d/%Y') if (samsungPatent_apd <= samsungPatent_apd2 and 'samsung' not in doc[6].lower()): tempString += '\t'.join(doc + ["{:.2%}".format(score)]) + '\n' progress_value += 1 zipObj.writestr(output_file, tempString) zipObj.close() progress_text += '\nPatent scoring completed sucessfully.' progress_value += 25 final_progress_value = 200 response = HttpResponse(zipIO.getvalue(), content_type='application/x-zip-compressed') response['Content-Disposition'] = 'attachment; filename=' + zipFileName es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progress_value": progress_value, "final_progress_value": final_progress_value}}) return HttpResponse('success') except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def fetch_update_patentscoring(request): try: if request.method == 'POST': userName = request.user.username; index_name_gloabals = 'apolloglobalspatentscoring' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) data = res['hits']['hits'][0]['_source'] return JsonResponse({'data': data}) except Exception as e: return HttpResponse( "Error running the program." + str(e)) ############################# patent scoring functions ends########################### @csrf_exempt def userRunModelTrackEM(request): try: if request.method == 'POST': try: userName = request.user.username; userTrack = request.body.decode('utf-8'); userTrack = json.loads(userTrack); saveProjectName = userTrack['saveProjectName'] trainedModelName = userTrack['trainedModelName'] testing_file_name = userTrack['testing_file_name'] testing_data_type = userTrack['testing_data_type'] except Exception as e: print('parsing went wrong', e) time = datetime.now() time = time.strftime("%I:%M%p on %B %d, %Y") update_fields = { 'username': userName, 'saveProjectName': saveProjectName, 'trainedModelName': trainedModelName, 'testing_file_name': testing_file_name, 'testing_data_type': testing_data_type, 'time': time + ' UTC Time' } index_name = 'userrunmodeltrackem' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def makePredictionsForSupervisedLearning(request): global responseTrain try: if request.method == "GET": return responseTrain elif request.method == "POST": userName = request.user.username; gui_parameters = request.POST.getlist('inputData')[0] get_value = json.loads(gui_parameters); finaltestingData = request.FILES.getlist('testFile') # training_data = request.FILES.getlist('file').read().decode("ISO-8859-1") testing_data = Process_All_Files(finaltestingData) saveProjectName = get_value['saveProjectName'] trainedModelName = get_value['trainedModelName'] testingFileName = get_value['testing_file_name'] testingDataType = get_value['testing_data_type'] progressbar_maximum = 120 progressbar_value = 0 # 1. supervised learning # Set the text in progressbarlabel programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + '. ' progressbarlabel_text = programRunStartTimeLabel progress_text = '' progress_text = progress_text + '-' * 75 + '\n' + \ "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time)." + '\n' + '-' * 75 + '\n' index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "current_tab": 3, "progressbar_value": progressbar_value, "testingFileName": testingFileName, "trainedModelName": trainedModelName, "testingDataType": testingDataType, "progress_text": progress_text , "progressbarlabel_text": progressbarlabel_text}}) # historyFilename = '' # if os.name == 'nt': # historyFilename = 'C:/Users/manali.s/Desktop/Classification_Projects/' # else: # historyFilename = '/data/Classification_Projects/' # historyFilename += comboboxAvailableProjects.get() + '/history.txt' key_value = 'classificationprojects/' key_value += saveProjectName + '/supervised/' key_value += trainedModelName + '/' s3 = boto3.client('s3') response1 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') # for line in contents.splitlines(): progressbar_value += 5 # Load the tfidf vectorizer pickle file that was previously saved in S3 tfidfVectorizer = response1['Body'].read() tfidf_vect = pickle.loads(tfidfVectorizer) # 1. Load the model model_file = response2['Body'].read() model = pickle.loads(model_file) progressbar_value += 5 # Prediction Phase # The code for patent and journal testing data is different because it required different preprocessing es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) if testingDataType == 'Patent': # Read the test patent data file_test_open = testing_data file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "Preprocessing unlabeled data..." progressbar_value += 5 # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 (file_test_proc, file_test_stem, temp) = preprocess_collection(file_test_open, stopterms, False, progress_text) file_test_proc = list(filter(None, file_test_proc)) file_test = list(filter(None, file_test_stem)) title_test = [doc.split('\t')[1] for doc in file_test] abstract_test = [doc.split('\t')[2] for doc in file_test] claim_test = [doc.split('\t')[3] for doc in file_test] test_data = [' '.join(doc) for doc in zip(title_test, abstract_test, claim_test)] progressbar_value += 15 progress_text = progress_text + "\nMaking predictions on unlabeled data..." # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) model_predicted = model.predict(test_data_tfidf.todense()) model_predicted = model_predicted.astype('U128') es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "model_predicted": json.dumps(model_predicted.tolist())}}) elif testingDataType == 'Journal': file_test_open = testing_data file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "Preprocessing unlabeled data..." progressbar_value += 10 # Remove the duplicated document based on "title" file_test_open = dedup_collection_journal(file_test_open, 1, 2) # preprocessing for scoupus data file_test_proc = preprocess_collection_journal(file_test_open) user_defined_stopwords = [] test_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_test_proc] progressbar_value += 20 progress_text = progress_text + "\nMaking predictions on unlabeled data..." # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) model_predicted = model.predict(test_data_tfidf.todense()) model_predicted = model_predicted.astype('U128') # end journal test data preprocessing es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "model_predicted": json.dumps(model_predicted.tolist())}}) model_prob = None # The 'predict_proba 'function cannot be used for SGD with hinge loss, hence we need calibrate the probability estimates for SGD with hinge loss if 'Support_Vector_Machine' in trainedModelName: model_isotonic_calibration = None model_sigmoid_calibration = None # Note that for Support Vector machine model we save the calibration model. So, we need to load the calibration model from S3 in this case. # Load the model to calibrate probabilities that will be used by the excel sheet # The calibration model that is savied could be either 'model_sigmoid_calibration' or 'model_isotonic_calibration' # We need to check which one of these models was saved for SVM. try: s3.Object(AWS_STORAGE_BUCKET_NAME, key_value + 'model_sigmoid_calibration.pkl').load() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # The object does not exist. model_sigmoid_calibration = None elif e.response['Error']['Code'] == "NoSuchKey": pass else: # Something else has gone wrong. final_progress_value = 200 errorString = 'Error running retreiving data from s3. Please contact the IP Group Analytics Team' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) else: # The object does exist. response3 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') sigmoid_calibration_file = response3['Body'].read() model_sigmoid_calibration = pickle.loads(sigmoid_calibration_file) try: s3.Object(AWS_STORAGE_BUCKET_NAME, key_value + 'model_isotonic_calibration.pkl').load() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # The object does not exist. model_isotonic_calibration = None elif e.response['Error']['Code'] == "NoSuchKey": pass else: # Something else has gone wrong. final_progress_value = 200 errorString = 'Error running retreiving data from s3. Please contact the IP Group Analytics Team' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) else: # The object does exist. response4 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') isotonic_calibration_file = response4['Body'].read() model_isotonic_calibration = pickle.loads(isotonic_calibration_file) if model_isotonic_calibration != None: model_prob = model_isotonic_calibration.predict_proba(test_data_tfidf.todense()) elif model_sigmoid_calibration != None: model_prob = model_sigmoid_calibration.predict_proba(test_data_tfidf.todense()) else: final_progress_value = 200 errorString = 'Error running retreiving data from s3. Please contact the IP Group Analytics Team' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) else: if 'Deep Learning' in trainedModelName: model_prob = model.predict_proba(testing_data) else: model_prob = model.predict_proba(test_data_tfidf.todense()) progressbar_value += 20 # classify the patent with rel. threshold < th to "others" class model_prob_all = copy.copy(model_prob) # model_prob[model_prob < 0.0] = 0.0 model_prob_new = np.sum(model_prob, axis=1) model_predicted[model_prob_new == 0] = 'others' trainingDataNumInstances = len(model_predicted) trainingDataNumClasses = len(set(model_predicted)) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "trainingDataNumInstances": trainingDataNumInstances, "trainingDataNumClasses": trainingDataNumClasses}}) # Update the treeview with the distribution of instances in the training data testingstats = [] for label in set(model_predicted): distribution = str( np.round(model_predicted.tolist().count(label) * 100.0 / len(model_predicted) * 1.0, 2)) + '%' testingstats.append([label, model_predicted.tolist().count(label), distribution]) trainingDataStatistics = testingstats labels = model.classes_ labels = sorted(list(set(labels))) labels.append(u'others') progressbar_value += 20 # Save the results to an excel worksheet progress_text = progress_text + "\nSaving results..." if testingDataType == 'Patent': workbook = load_workbook('./static/template_patent.xlsx') elif testingDataType == 'Journal': workbook = load_workbook('./static/template_journal.xlsx') resultsSheet = workbook["Results"] rawDataSheet = workbook["Raw_Data"] # Write results for predicted probabilities and class names to the sheet containing raw data # In excel, the indices start from 1, instead of 0 if testingDataType == 'Patent': # Now, the first line is header, so remove the first line for row_index in np.arange(len(file_test_proc)): doc = file_test_proc[row_index].split('\t') for column_index in np.arange(8): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=9).value = model_predicted[row_index].strip('\"') elif testingDataType == 'Journal': for row_index in np.arange(len(file_test_open)): doc = file_test_open[row_index].split('\t') for column_index in np.arange(6): resultsSheet.cell(row=row_index + 2, column=column_index + 1).value = doc[column_index].strip( '\"') resultsSheet.cell(row=row_index + 2, column=7).value = model_predicted[row_index].strip('\"') # In the Raw_Data sheet, write the class names starting from column B column_header_index = 2 for cls in model.classes_: rawDataSheet.cell(row=1, column=column_header_index).value = cls.strip('\r') column_header_index += 1 # Wirte all the probabilities for each class assgined by the model in the Raw_Data sheet for row_index in np.arange(len(model_prob_all)): for column_index in np.arange(len(model_prob_all[row_index])): # The first column in template excel file is formula for 'OTHERS', # hence start writing the probability values from second column in the excel sheet rawDataSheet.cell(row=row_index + 2, column=column_index + 2).value = model_prob_all[ row_index][column_index] # workbook.save(re.sub('.txt', '_Threshold_Analysis.xlsx',gui_parameters['testing_file_name'])) # thresholdAnalysisResultFileName = re.sub('.txt', '_Threshold_Analysis.xlsx', testingFileName) # response1 = HttpResponse(content_type='application/ms-excel') responseTrain = HttpResponse(content=save_virtual_workbook(workbook), content_type='application/vnd.ms-excel') responseTrain['Content-Disposition'] = 'attachment; filename=' + thresholdAnalysisResultFileName progressbar_value += 25 programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progressbarlabel_text": progressbarlabel_text, "trainingDataStatistics": trainingDataStatistics, "progress_text": progress_text}}) progressbar_value += 5 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "final_progress_value": final_progress_value}}) return HttpResponse('successfully executed') except Exception as e: final_progress_value = 200 errorString = errorString es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"errorString": errorString, "final_progress_value": final_progress_value}}) return JsonResponse({'finalResponse': 'Error'}) @csrf_exempt def userRunModelTrackIL(request): try: if request.method == 'POST': try: userName = request.user.username; userTrack = request.body.decode('utf-8'); userTrack = json.loads(userTrack); saveProjectName = userTrack['saveProjectName'] trainedModelName = userTrack['trainedModelName'] training_file_name = userTrack['training_file_name'] training_data_type = userTrack['training_data_type'] except Exception as e: print('parsing went wrong', e) time = datetime.now() time = time.strftime("%I:%M%p on %B %d, %Y") update_fields = { 'username': userName, 'saveProjectName': saveProjectName, 'trainedModelName': trainedModelName, 'training_file_name': training_file_name, 'training_data_type': training_data_type, 'time': time + ' UTC Time' } index_name = 'userrunmodeltrackil' if es_conn.indices.exists(index_name): es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.create(index=index_name, doc_type='_doc', body=update_fields, id=uuid.uuid4()) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def makePredictionsForUnsupervisedLearning(request): global response try: if request.method == 'GET': return response elif request.method == "POST": userName = request.user.username; gui_parameters = request.POST.getlist('inputData')[0] get_value = json.loads(gui_parameters); finaltestingData = request.FILES.getlist('testFile') # training_data = request.FILES.getlist('file').read().decode("ISO-8859-1") testing_data = Process_All_Files(finaltestingData) saveProjectName = get_value['saveProjectName'] trainedModelName = get_value['trainedModelName'] testingFileName = get_value['testing_file_name'] testingDataType = get_value['testing_data_type'] current_tab = 4 programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progressbarlabel_text = programRunStartTimeLabel progress_text = '' progress_text = progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time)." + \ '\n' + '-' * 75 + '\n' + "Starting unsupervised learning process...\n" # trainedModelName = 'K-means clustering' numberOfClusters = int(re.search('NumClusters=(.*)_TopWords', trainedModelName).group(1)) numberOfClusters = numberOfClusters number_of_top_words = int(trainedModelName.split("TopWords=")[1].replace('/', '')) progressbar_maximum = 60 + 10 * numberOfClusters progressbar_value = 10 index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "current_tab": current_tab, "progressbar_value": progressbar_value, "testingFileName": testingFileName, "trainedModelName": trainedModelName, "testingDataType": testingDataType, "progress_text": progress_text , "progressbarlabel_text": progressbarlabel_text, "numberOfClusters": numberOfClusters, "number_of_top_words": number_of_top_words}}) # historyFilename = '' # if os.name == 'nt': # historyFilename = 'C:/Users/manali.s/Desktop/Classification_Projects/' # else: # historyFilename = '/data/Classification_Projects/' # historyFilename += comboboxAvailableProjects.get() + '/history.txt' # Set directory for the output folder # output_folder = os.path.dirname(testing_file_name) # Read the paper data from the file # [type, title, abstract, author, affiliation, year] # The code for patent and journal testing data is different because it required different preprocessing user_defined_stopwords = [] # Load tfidf vectorizer key_value = 'classificationprojects/' key_value += saveProjectName + '/unsupervised/' key_value += trainedModelName + '/' s3 = boto3.client('s3') response1 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') # for line in contents.splitlines(): # Load the tfidf vectorizer pickle file that was previously saved in S3 tfidfVectorizer = response1['Body'].read() tfidf_vect = pickle.loads(tfidfVectorizer) progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) if testingDataType == 'Patent': # Read the test patent data file_test_open = testing_data file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "Preprocessing unlabeled data...\n" es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 (file_test_proc, file_test_stem, file_test_stop_words_removed) = preprocess_collection(file_test_open, stopterms, False, progress_text) file_test_proc = list(filter(None, file_test_proc)) file_test = list(filter(None, file_test_stem)) title_test = [doc.split('\t')[1] for doc in file_test] abstract_test = [doc.split('\t')[2] for doc in file_test] claim_test = [doc.split('\t')[3] for doc in file_test] test_data = [' '.join(doc) for doc in zip(title_test, abstract_test, claim_test)] title_test_stop_words_removed = [doc.split('\t')[1] for doc in file_test_stop_words_removed] abstract_test_stop_words_removed = [doc.split('\t')[2] for doc in file_test_stop_words_removed] claim_test_stop_words_removed = [doc.split('\t')[3] for doc in file_test_stop_words_removed] unlabeled_test_stop_words_removed = [' '.join(doc) for doc in zip(title_test_stop_words_removed, abstract_test_stop_words_removed, claim_test_stop_words_removed)] # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) progress_text = progress_text + "\nRemoved duplicates and preprocessed " + str( len(test_data)) + " documents." progressbar_value += 20 # Load the model from pickle file response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') # 1. Load the model model_file = response2['Body'].read() model = pickle.loads(model_file) # determine the best model based on evaluating several models automatic_mode = True progress_text = progress_text + "\nPerforming clustering on the data..." output_filename = re.sub('.txt', '_Results_Topic_Modeling.txt', testingFileName) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) # Predict clusters of documents clusters = model.predict(test_data_tfidf) test_patent_others = {'content': test_data, 'file_others': file_test_proc, 'content_stop_words_removed': unlabeled_test_stop_words_removed, 'cluster': clusters} # 1. Load the previous data frame to run topic modeling again on each set of document clusters response3 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') topicModelingFrame = response3['Body'].read() oldFrame = pickle.loads(topicModelingFrame) # The new documents are clustered using the old model, however, the topics have to be re-computed for each cluster based on new documents. frame = pd.DataFrame(test_patent_others, index=[clusters], columns=['content', 'file_others', 'content_stop_words_removed', 'cluster']) # Append old frame to new frame and recompute the topics for clusters. # Alternatively, check if IP Managers would like to see the new documents belonging to the same clusters; but this might not be the expected behavior. frame = frame.append(oldFrame) clustering_successful = False # output lda other topics # Need to extract number of top words from path to save the model progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # with open(output_filename, 'w+') as fout_others: for no in range(numberOfClusters): try: # sometimes, there is no document in the group, so handle that case with try and except patent_group = frame.groupby(frame['cluster']).get_group(no) except: # continue, because there is no document in this cluster. Move on to the topic modeling for next cluster continue patent_tac = patent_group.ix[:, 0].tolist() patent_org = patent_group.ix[:, 1].tolist() lda_tf_vect = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = None try: tf = lda_tf_vect.fit_transform(patent_tac) except Exception as e: lda_tf_vect = TfidfVectorizer(max_df=1.0, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = lda_tf_vect.fit_transform(patent_tac) # LDA Model lda = LatentDirichletAllocation(n_components=1, max_iter=20, learning_method='online', learning_offset=50, random_state=0).fit(tf) lda_feature_names = lda_tf_vect.get_feature_names() lda_topics = get_topic_list(lda, lda_feature_names, number_of_top_words) clusterTopicsAndCounts = [] clusterTopicsAndCounts.append([len(patent_tac), lda_topics[0]]) doc_topic = lda.transform(tf) doc_topic_index = doc_topic.argmax(axis=1) fout_others = '' for doc, doc_topic_i in zip(patent_org, doc_topic_index): fout_others += '\t'.join( [doc.strip('\r').strip('\n'), lda_topics[doc_topic_i].strip('\r').strip('\n')]) + '\n' progressbar_value += 10; progress_text = progress_text + '\nTopic extraction and clustering completed.' programRunEndTime = datetime.now(); timeDifference = relativedelta(programRunEndTime, programRunStartTime); programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds); progressbarlabel_text = programRunStartTimeLabel; progress_text = progress_text + programRunStartTimeLabel + '\n' + '*' * 95 + '\n'; progressbar_value += 10; final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "progressbarlabel_text": progressbarlabel_text, "final_progress_value": final_progress_value}}) response = HttpResponse(content=fout_others, content_type='text/plain'); response['Content-Disposition'] = 'attachment; filename=' + output_filename return HttpResponse('successfully executed') elif testingDataType == 'Journal': file_test_open = testing_data file_test_open = file_test_open.split('\n') # split by new line file_test_open = list(filter(None, file_test_open)) # delete empty lines # Now, the first line is header, so remove the first line file_test_open = file_test_open[1:] progress_text = progress_text + "\nPreprocessing unlabeled data..." es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text}}) # Remove the duplicated document based on "title" file_test_open = dedup_collection_journal(file_test_open, 1, 2) # preprocessing for scoupus data file_test_proc = preprocess_collection_journal(file_test_open) test_data = [] unlabeled_test_stop_words_removed = [] for doc in file_test_open: stop_and_stem_document_title, stop_document_title = stop_and_stem_journal_2(doc.split('\t')[1], user_defined_stopwords) stop_and_stem_document_abstract, stop_document_abstract = stop_and_stem_journal_2( doc.split('\t')[2], user_defined_stopwords) test_data.append(' '.join([stop_and_stem_document_title, stop_and_stem_document_abstract])) unlabeled_test_stop_words_removed.append(' '.join([stop_document_title, stop_document_abstract])) # convert text data to tfidf test_data_tfidf = tfidf_vect.transform(test_data) progress_text = progress_text + "\nRemoved duplicates and preprocessed " + str( len(test_data)) + " documents." progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) # Load the model from pickle file response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') # 1. Load the model model_file = response2['Body'].read() model = pickle.loads(model_file) # determine the best model based on evaluating several models automatic_mode = True progress_text = progress_text + "\nPerforming clustering on the data..." es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text}}) output_filename = re.sub('.txt', '_Results_Topic_Modeling.txt', testingFileName) # Predict clusters of documents clusters = model.predict(test_data_tfidf) test_patent_others = {'content': test_data, 'file_others': file_test_proc, 'content_stop_words_removed': unlabeled_test_stop_words_removed, 'cluster': clusters} # 1. Load the previous data frame to run topic modeling again on each set of document clusters response3 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') topicModelingFrame = response3['Body'].read() oldFrame = pickle.loads(topicModelingFrame) # The new documents are clustered using the old model, however, the topics have to be re-computed for each cluster based on new documents. frame = pd.DataFrame(test_patent_others, index=[clusters], columns=['content', 'file_others', 'content_stop_words_removed', 'cluster']) # Append old frame to new frame and recompute the topics for clusters. # Alternatively, check if IP Managers would like to see the new documents belonging to the same clusters; but this might not be the expected behavior. frame = frame.append(oldFrame) clustering_successful = False # output lda other topics # Need to extract number of top words from path to save the model progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progressbar_value": progressbar_value}}) # with open(output_filename, 'w+') as fout_others: for no in range(numberOfClusters): try: # sometimes, there is no document in the group, so handle that case with try and except patent_group = frame.groupby(frame['cluster']).get_group(no) except: # continue, because there is no document in this cluster. Move on to the topic modeling for next cluster continue patent_tac = patent_group.ix[:, 0].tolist() patent_org = patent_group.ix[:, 1].tolist() lda_tf_vect = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = None try: tf = lda_tf_vect.fit_transform(patent_tac) except Exception as e: lda_tf_vect = TfidfVectorizer(max_df=1.0, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = lda_tf_vect.fit_transform(patent_tac) # LDA Model lda = LatentDirichletAllocation(n_components=1, max_iter=20, learning_method='online', learning_offset=50, random_state=0).fit(tf) lda_feature_names = lda_tf_vect.get_feature_names() lda_topics = get_topic_list(lda, lda_feature_names, number_of_top_words) clusterTopicsAndCounts = [] clusterTopicsAndCounts.append([len(patent_tac), lda_topics[0]]) doc_topic = lda.transform(tf) doc_topic_index = doc_topic.argmax(axis=1) fout_others = '' for doc, doc_topic_i in zip(patent_org, doc_topic_index): fout_others += '\t'.join( [doc.strip('\r').strip('\n'), lda_topics[doc_topic_i].strip('\r').strip('\n')]) + '\n' progressbar_value += 10 progress_text = progress_text + '\nTopic extraction and clustering completed.' es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value}}) # Load the topic modeling results in the treeview # Compute the columns: (i) # instances, (ii) Topics extracted # populateOutputTreeviewWithTopicModelingResults(clusterTopicsAndCounts) programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel progress_text = progress_text + programRunStartTimeLabel + '\n' + '*' * 95 + '\n' progressbar_value += 10 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"progress_text": progress_text, "progressbar_value": progressbar_value, "progressbarlabel_text": progressbarlabel_text, "final_progress_value": final_progress_value}}) response = HttpResponse(content=fout_others, content_type='text/plain') response['Content-Disposition'] = 'attachment; filename=' + output_filename return HttpResponse('successfully executed') except Exception as e: final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={ "doc": {"final_progress_value": final_progress_value}}) return JsonResponse({'finalResponse': 'Error'}) def stop_and_stem_journal_2(file_t, user_defined_stopwords): ''' Input: file_t: a text string, stopterms: a dictionary of stop terms Output: file_stem: a list of stopped and stemmed terms ''' try: stopterms = build_stopterms_journal(user_defined_stopwords) # remove the patent specific terms file_t = file_t.lower() # lowercase all file_t = re.sub("[^a-zA-Z0-9 ]", " ", file_t) # remove non-alphanumeric file_t = re.sub("\s[0-9]+$", '', file_t) file_t = re.sub("\s[0-9]+\s", ' ', file_t) file_t = re.sub("^[0-9]+\s", '', file_t) file_t = re.sub("androids*", "antroid", file_t) file_t = re.sub("andes", "antes", file_t) file_t = re.sub("and[0-9a-z]+", "", file_t) file_t = re.sub("antroid", "android", file_t) file_t = re.sub("antes", "andes", file_t) file_t = re.sub("including[0-9a-z]+", "", file_t) file_t = re.sub("wherein[0-9a-z]+", "", file_t) file_t = re.sub("comprising[0-9a-z]+", "", file_t) formula_chk0 = re.findall(" formula | math ", file_t) formula_chk1 = re.findall(" tail ", file_t) formula_chk2 = re.findall(" lead ", file_t) if len(formula_chk0) > 0 and len(formula_chk1) > 0 and len(formula_chk2) > 0: file_t = re.sub(" formula | math ", " ", file_t) file_t = re.sub(" tail ", " ", file_t) file_t = re.sub(" lead ", " ", file_t) file_t = " ".join(file_t.split()) # split by any whitespace and rejoin w/ space file_t = file_t.split(" ") # split by space # remove the stop terms in the text file_stop = [] # initialize list for term in file_t: if term not in stopterms: file_stop.append(term) # stem using porter algorithm file_stem = [] # initialize list for term in file_stop: try: term = wn().lemmatize(term) except: pass term = str(term) file_stem.append(term) file_stem = ' '.join(file_stem) file_stop = ' '.join(file_stop) return file_stem, file_stop except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def incrementalsupervisedlearning(request): # Assuming that the training data uploaded by the user is available in the variable progress_text = "" progressbarlabel_text = "" global training_data try: # Assuming that the client-side has already selected the options before running the program. # Assuming that the request from the client side will have all the fields necessary for running the program. if request.method == "POST": gui_parameters = request.POST.getlist('inputData')[0] gui_parameters = json.loads(gui_parameters); finalTrainingData = request.FILES.getlist('trainFile') # training_data = request.FILES.getlist('file').read().decode("ISO-8859-1") training_data = Process_All_Files(finalTrainingData) trainingFileName = gui_parameters['training_file_name'] trainingDataType = gui_parameters['training_data_type'] selectedProjectName = gui_parameters['saveProjectName'] selectedModelName = gui_parameters['trainedModelName'] current_tab = 5 username = request.user.username; progressbar_maximum = 200 progressbar_value = 0 # Set the text in progressbarlabel programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progressbarlabel_text = programRunStartTimeLabel progress_text = progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + '-' * 75 + '\n' + "Starting incremental learning process..." index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "current_tab": current_tab, "progressbar_value": progressbar_value, "trainingFileName": trainingFileName, "trainedModelName": selectedModelName, "trainingDataType": trainingDataType, "progress_text": progress_text , "progressbarlabel_text": progressbarlabel_text}}) historyFilename = 'history.txt' append_text_to_history_file = "" # The code for loading and pre-processing the data is different for patent and journal data if trainingDataType == 'Patent': file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " patents! \nPreprocessing documents...\n" # Set value of progressbar to 10 once the training dataset is loaded progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # append user-provided stop words. This is not available for incremental learning framework, so keep it empty. user_defined_stopwords = [] stops = stops + user_defined_stopwords # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 # Preprocess the sample file # Also, we need to check the original data file for any duplicates in the new data # Load the latest training data for this model and use the application number column to deduplicate the documents from previous patents data s3 = boto3.client('s3') key_value = 'classificationprojects/' key_value += selectedProjectName + '/supervised/' key_value += selectedModelName + '/' try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_patents.txt') # Need to change to training_data_patent after impementation of save_both_exisitng_model oldPatentsFile = response['Body'].read() # The training data already exists, and we need to append the new data to this data as well. file_old_training_data_open = oldPatentsFile.decode('utf-8') training_data_IL = training_data + '\n' + file_old_training_data_open.split('\n', 1)[1] file_old_training_data_open = file_old_training_data_open.split('\n') # split by new line file_old_training_data_open = list(filter(None, file_old_training_data_open)) # delete empty lines # Now, the first line is header, so remove the first line file_old_training_data_open = file_old_training_data_open[1:] existing_application_numbers = [doc.split('\t')[4].upper() for doc in file_old_training_data_open] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so no need to deduplicate based on previous data. existing_application_numbers = [] elif e.response['Error']['Code'] == "NoSuchKey": existing_application_numbers = [] pass else: existing_application_numbers = [] pass # Remove the duplicated documents based on "Application number" (file_sample_proc, file_sample_stem, file_test_stop_words_removed) = preprocess_collection_incremental_learning(username, file_sample_open, stopterms, existing_application_numbers, True, progress_text) file_sample = list(filter(None, file_sample_stem)) if len(file_sample) < 1: progressbar_value = 180 # Simply display the error message below and exit. final_progress_value = 200 errorString = 'The additional training data file does not contain any new Patents for training the model. \nCannot perform incremental learning in this case.' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': "Error"}) title_samples = [doc.split('\t')[1].strip('\r').strip('\n') for doc in file_sample] abstract_samples = [doc.split('\t')[2].strip('\r').strip('\n') for doc in file_sample] claim_samples = [doc.split('\t')[3].strip('\r').strip('\n') for doc in file_sample] label_samples = [doc.split('\t')[8].lower().strip('\r').strip('\n') for doc in file_sample] labels = sorted(list(set(label_samples))) train_data = [' '.join(doc) for doc in zip(title_samples, abstract_samples, claim_samples)] train_target = label_samples # End patent training data elif trainingDataType == 'Journal': file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents! \nPreprocessing documents...\n" # Set value of progressbar to 10 once the training dataset is loaded progressbar_value = 10 # Load the latest training data for this model and use the title and abstract columns to deduplicate the documents from previous journals data es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # load_data_from_S3() s3 = boto3.client('s3') key_value = 'classificationprojects/' key_value += selectedProjectName + '/supervised/' key_value += selectedModelName + '/' try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_journals.txt') oldJournalsFile = response['Body'].read() # The training data already exists, and we need to append the new data to this data as well. # file_old_training_data_open = codecs.open(oldJournalsFile).read() # open file file_old_training_data_open = oldJournalsFile.decode('utf-8') training_data_IL = training_data + '\n' + file_old_training_data_open.split('\n', 1)[1] file_old_training_data_open = file_old_training_data_open.split('\n') # split by new line file_old_training_data_open = list(filter(None, file_old_training_data_open)) # delete empty lines # Now, the first line is header, so remove the first line file_old_training_data_open = file_old_training_data_open[1:] existing_title_and_abstracts = ['\t'.join([doc.split('\t')[1], doc.split('\t')[2]]) for doc in file_old_training_data_open] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so no need to deduplicate based on previous data. existing_title_and_abstracts = [] elif e.response['Error']['Code'] == "NoSuchKey": existing_title_and_abstracts = [] pass else: existing_title_and_abstracts = [] pass # Remove the duplicated documents based on "title" file_sample_open_journal = dedup_collection_journal_incremental_learning(file_sample_open, existing_title_and_abstracts, 1, 2) if len(file_sample_open_journal) < 1: progressbar_value = 180 final_progress_value = 200 errorString = 'The additional training data file does not contain any new journals for training the model. \nCannot perform incremental learning in this case.' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "progressbar_value": progressbar_value, "errorString": errorString}}) return JsonResponse({'finalResponse': "Error"}) # Preprocessing for scoupus data file_sample_open_journal = preprocess_collection_journal(file_sample_open_journal) # Take the stopwords from the GUI and add them to the stopwords list. This is not available for incremental learning framework. user_defined_stopwords = [] file_sample_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_sample_open_journal] # Training Phase label_samples = [doc.split('\t')[-1].lower().strip('\r').strip('\n') for doc in file_sample_open_journal] labels = sorted(list(set(label_samples))) train_data = file_sample_data train_target = label_samples # end journal training data preprocessing progress_text = progress_text + "Removed duplicates and preprocessed " + str( len(train_data)) + " documents." # Increment the value of progressbar by 20 once the training dataset is loaded progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Check if the training data contains any new class. If yes, then the model needs to be trained from scratch # Warn user to make sure that the new class wasn't added mistakenly # Get the classes from the trainingDataStatistics object loaded from S3 buket INDEX_NAME = 'savemodelsupervised' query = {"query": {"bool": {"must": [{"match": {"saveProjectName.keyword": selectedProjectName}}, {"match": { "model_data.trainedModelName.keyword": selectedModelName}}]}}} res = es_conn.search(index=INDEX_NAME, body=query) trainingDataStatisticsFromS3 = res['hits']['hits'][0]['_source']['model_data']['trainingDataTables'] # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'trainingDataStatistics.pkl') # # trainingDataStatisticsFromS3 = response['Body'].read() trainingDataStatistics = json.loads(trainingDataStatisticsFromS3) currentModelClasses = [data[0] for data in trainingDataStatistics] newclassString = '' if not set(train_target).issubset(set(currentModelClasses)): # Get a list of new classes newClassesList, oldClasses = set(train_target), set(currentModelClasses) newClasses = [] for cls in newClassesList: if cls not in oldClasses: newclassString = newclassString + '\'' + cls.strip('\r').strip( '\n') + '\' ' newClasses.append(cls) progressbar_value_IL = progressbar_value progressbar_value = 0 train_data_IL = train_data train_target_IL = train_target es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value_IL, "train_data": train_data_IL, "train_target": train_target_IL}}) if 'accuracy' in selectedModelName: targetPerformanceMeasure = 'accuracy' elif 'auc' in selectedModelName: targetPerformanceMeasure = 'auc' elif 'macro_f1' in selectedModelName: targetPerformanceMeasure = 'macro_f1' elif 'macro_precision' in selectedModelName: targetPerformanceMeasure = 'macro_precision' elif 'macro_recall' in selectedModelName: targetPerformanceMeasure = 'macro_recall' targetPerformanceMeasure_IL = targetPerformanceMeasure es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"targetPerformanceMeasure_IL": targetPerformanceMeasure_IL}}) # Show the message below. If user presses "Yes", then call another function to retrain the model from scratch. Otherwise, do nothing further. return JsonResponse( {'finalResponse': 'retrainModelFromScratch ', 'newclassString': newclassString}) # If no new classes are found, continue with model training as usual. # For multinomial naive Bayes, incremental learning produces bad results. For now, the workaround is to retrain the model from scratch if 'Multinomial_Naive_Bayes' in selectedModelName: if 'accuracy' in selectedModelName: targetPerformanceMeasure = 'accuracy' elif 'auc' in selectedModelName: targetPerformanceMeasure = 'auc' elif 'macro_f1' in selectedModelName: targetPerformanceMeasure = 'macro_f1' elif 'macro_precision' in selectedModelName: targetPerformanceMeasure = 'macro_precision' elif 'macro_recall' in selectedModelName: targetPerformanceMeasure = 'macro_recall' # F1, recall, precision successful = retrainModelFromScratch(train_data, train_target, selectedProjectName, selectedModelName, targetPerformanceMeasure, programRunStartTime, False, username) if successful == True: # Append the patent data to patent training file, and journal data to journal training file if trainingDataType == 'Patent': saveTrainingFileKey = key_value + 'training_data_patents.txt' elif trainingDataType == 'Journal': saveTrainingFileKey = key_value + 'training_data_journals.txt' try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) oldFile = response['Body'].read() # The training data already exists, and we need to append the new data to this data as well. # file_old_training_data_open = codecs.open(oldFile).read() # open file file_old_training_data_open = oldFile.decode('utf-8') file_old_training_data_open += training_data # write the file back to S3 s3.put_object(Body=file_old_training_data_open, Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so simply put the file into S3 s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value}}) return JsonResponse({'finalResponse': "Incremental learning finished successfully."}) else: progressbar_value = 100 final_progress_value = 200 errorString = 'retrainModelFromScratch failed. Please contact the IP Group Analytics Team.' + e es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "progressbar_value": progressbar_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) # At this point, proceed with incremental learning for other models oldStatsForSelectedModels = trainingDataStatistics # Update the training data statistics # Add the number of instances for respective classes and recompute the class distribution newStatsForSelectedModel = [] oldStatsForSelectedModelLabels = [i[0] for i in oldStatsForSelectedModels] oldStatsForSelectedModelCounts = [int(i[1]) for i in oldStatsForSelectedModels] oldStatsTotalInstances = np.sum(oldStatsForSelectedModelCounts) newStatsForSelectedModel = [] for i in range(len(oldStatsForSelectedModelLabels)): label = oldStatsForSelectedModelLabels[i] distribution = str(np.round((oldStatsForSelectedModelCounts[i] + train_target.count(label)) * 100.0 / ( oldStatsTotalInstances + len(train_target)) * 1.0, 2)) + '%' newStatsForSelectedModel.append( [label, oldStatsForSelectedModelCounts[i] + train_target.count(label), distribution]) trainingDataStatistics = newStatsForSelectedModel # simply copy the file to the trainingData folder with the timestamp information # simply copy the file to the trainingData folder with the timestamp information # training_data = pickle.dumps(training_data) # s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + '/trainingData/' + os.path.basename( # trainingFileName) + '_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + # os.path.splitext(trainingFileName)[1]) if trainingDataType == 'Patent': saveTrainingFileKey = key_value + 'training_data_patents.txt' elif trainingDataType == 'Journal': saveTrainingFileKey = key_value + 'training_data_journals.txt' # Append the patent data to patent training file, and journal data to journal training file # Load the data file from S3, append the data to the file, and then write the updated file back to S3. # First, check whether there is existing patent (or journal) training data file that already exists in S3. If exists, then append the existing file, else, create and save the new file. try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) oldFile = response['Body'].read() # The training data already exists, and we need to append the new data to this data as well. # file_old_training_data_open = codecs.open(oldFile).read() # open file file_old_training_data_open = oldFile.decode('utf-8') # Remove the first header line file_old_training_data_open += training_data.split('\n', 1)[1] # write the file back to S3 s3.put_object(Body=file_old_training_data_open, Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so simply put the file into S3 s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=saveTrainingFileKey) elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass append_text_to_history_file += '-' * 100 + '\n' + 'username: ' + username + '\n' append_text_to_history_file += "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + '-' * 100 + '\n' # Update the history based on whether the updated mdoel is supervised or unsupervised model. if trainingDataType == 'Patent': append_text_to_history_file += 'Supervised learning model ' + selectedModelName + ' was incrementally updated using the PATENT training data file: ' + trainingFileName + '.' + '\n' append_text_to_history_file += str(len(train_target)) + ' training examples were added to the model.\n' append_text_to_history_file += 'Total number of examples with which the model is fully trained is: ' + str( oldStatsTotalInstances + len(train_target)) + '\n' elif trainingDataType == 'Journal': append_text_to_history_file += 'Supervised learning model ' + selectedModelName + ' was incrementally updated using the JOURNAL training data file: ' + trainingFileName + '.' + '\n' append_text_to_history_file += str(len(train_target)) + ' training examples were added to the model.\n' append_text_to_history_file += 'Total number of examples with which the model is fully trained is: ' + str( oldStatsTotalInstances + len(train_target)) + '\n' # Write the new stats for the model to pickle file to save dump_trainingDataStatistics = pickle.dumps(trainingDataStatistics) s3.put_object(Body=dump_trainingDataStatistics, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainingDataStats.pkl') progress_text = progress_text + "\nTraining Data Statistics have been updated." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Load the tfidf vectorizer that was previously saved response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') tfidfVectorizerFile = response['Body'].read() tfidf_vect = pickle.loads(tfidfVectorizerFile) ##tf-idf with params train_tfidf = tfidf_vect.transform(train_data) # Update model with this additional data # 1. Load the model response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') modelFile = response['Body'].read() model = pickle.loads(modelFile) # Need to update train_target for training, because the new data might not have all the classes on which the model was initially trained. train_target_original = model.classes_ currentModelClasses = [data[0] for data in trainingDataStatistics] # Increment the value of progressbar by 10 once the training stats have ben updated progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # Update the model using partial_fit. # The partial_fit method will be different for LR and SVM models if ('Support_Vector_Machine' in selectedModelName) or ('Logistic_Regression' in selectedModelName): def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] train_tfidf_dense = train_tfidf.toarray() n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=train_target_original) else: if 'Deep_Learning' in selectedModelName: if training_data_IL == None: model.fit(training_data) else: model.fit(training_data_IL) else: model.partial_fit(train_tfidf.todense(), train_target, classes=train_target_original) progress_text = progress_text + "\nThe model has been incrementally updated with additional training data." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # If old training data exists, load the data and preprocess the data # Once loaded, append the data to train_data and train_target to train the model from scratch all_train_data = [] progress_text = progress_text + "\nUpdating the five-fold cross validation performances..." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_patents.txt') oldFile = response['Body'].read() # file_sample_open = codecs.open(oldFile) # open file file_sample_open = oldFile.decode('utf-8') file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound existing " + str( len(file_sample_open)) + " patents in prior training data files! \nPreprocessing documents...\n" es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # append user-provided stop words try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'user_defined_stopwords.pkl') user_defined_stopwords_file = response['Body'].read() user_defined_stopwords = pickle.loads(user_defined_stopwords_file) user_defined_stopwords = list(user_defined_stopwords) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so do nothing user_defined_stopwords = [] elif e.response['Error']['Code'] == "NoSuchKey": user_defined_stopwords = [] pass else: pass stops = stops + user_defined_stopwords # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 # Preprocess the sample file existing_application_numbers = [] (file_sample_proc, file_sample_stem, temp) = preprocess_collection_incremental_learning( username, file_sample_open, stopterms, existing_application_numbers, True, progress_text) file_sample = list(filter(None, file_sample_stem)) title_samples = [doc.split('\t')[1] for doc in file_sample] abstract_samples = [doc.split('\t')[2] for doc in file_sample] claim_samples = [doc.split('\t')[3] for doc in file_sample] label_samples = [doc.split('\t')[8].lower() for doc in file_sample] labels = sorted(list(set(label_samples))) train_data_exisiting_patents = [' '.join(doc) for doc in zip(title_samples, abstract_samples, claim_samples)] train_target_existing_patents = label_samples # Append the existing data to new data all_train_data = train_data + train_data_exisiting_patents all_train_target = train_target + train_target_existing_patents # End patent training data except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so do nothing pass elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_journals.txt') oldFile = response['Body'].read() # file_sample_open = codecs.open(oldFile) # open file file_sample_open = oldFile.decode('utf-8') file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents from prior training data files! \nPreprocessing documents...\n" es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Remove the duplicated documents based on "title" file_sample_open = dedup_collection_journal(file_sample_open, 1, 2) # Preprocessing for scoupus data file_sample_open = preprocess_collection_journal(file_sample_open) # Take the stopwords from the GUI and add them to the stopwords list # append user-provided stop words try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'user_defined_stopwords.pkl') user_defined_stopwords_file = response['Body'].read() user_defined_stopwords = pickle.loads(user_defined_stopwords_file) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so do nothing user_defined_stopwords = [] elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass if isinstance(user_defined_stopwords, str): if user_defined_stopwords.strip('\r').strip('\n') == '': user_defined_stopwords = [] file_sample_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_sample_open] # Training Phase label_samples = [doc.split('\t')[-1].lower() for doc in file_sample_open] labels = sorted(list(set(label_samples))) train_data_existing_journals = file_sample_data train_target_existing_journals = label_samples if len(all_train_data) == 0: all_train_data = train_data + train_data_existing_journals all_train_target = train_target + train_target_existing_journals else: all_train_data = all_train_data + train_data_existing_journals all_train_target = all_train_target + train_target_existing_journals except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing journal training data, so do nothing pass elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass # end journal training data preprocessing all_train_tfidf = tfidf_vect.transform(all_train_data) if 'Support_Vector_Machine' in selectedModelName: # Need to update the calibration model that will be used to output probabilities in the excel sheet model_isotonic_calibration = None model_sigmoid_calibration = None # calibrate probabilities that will be used by the excel sheet if len(all_train_target) > 500: model_isotonic_calibration = CalibratedClassifierCV(model, cv="prefit", method='isotonic') model_isotonic_calibration.fit(all_train_tfidf.todense(), all_train_target) else: model_sigmoid_calibration = CalibratedClassifierCV(model, cv="prefit", method='sigmoid') model_sigmoid_calibration.fit(all_train_tfidf.todense(), all_train_target) # Save the calibration models for SVM if model_isotonic_calibration != None: # Save the isotonic calibration model isotonic_model = pickle.dumps(model_isotonic_calibration) s3.put_object(Body=isotonic_model, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') # Remove the sigmoid calibration model, because this model was trained on less than 500 training data examples try: response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') response2 = s3.delete_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": pass elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass if model_sigmoid_calibration != None: # Save the sigmoid calibration model isotonic_model = pickle.dumps(model_sigmoid_calibration) s3.put_object(Body=isotonic_model, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') # Save the model, model's CV performance, and CV performance STDEV model_dumps = pickle.dumps(model) s3.put_object(Body=model_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model.pkl') # Increment the value of progressbar by 20 once the model is incrementally updated with additional data progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # Update model's CV performance and save to file # Load the fiveFoldModel.pkl file with the five models # We cannot save the five fold training models for CV, because of multiprocessing implementation, which does not allow saving models to Multiprocessing Queues. # The logic has been changed to recompute five fold cross validation performances. # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldModels.pkl') # fiveFoldModelsFile = response['Body'].read() # fiveFoldModels = pickle.loads(fiveFoldModelsFile) # Also need to get the test data, which would be the original data # All the data is already saved into respective pickle files. Load the training and testing data for the five folds from the file, and add the training and test datasets # to respective datasets and re-evaluate the model's 5-fold cross validation performance. # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTestingDatasetTfidf.pkl') # fiveFoldTestingDatasetTfidfFile = response['Body'].read() # fiveFoldTestingDatasetTfidf = pickle.loads(fiveFoldTestingDatasetTfidfFile) # # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTestingLabels.pkl') # fiveFoldTestingLabelsFile = response['Body'].read() # fiveFoldTestingLabels = pickle.loads(fiveFoldTestingLabelsFile) # # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTrainingDatasetTfidf.pkl') # fiveFoldTrainingDatasetTfidfFile = response['Body'].read() # fiveFoldTrainingDatasetTfidf = pickle.loads(fiveFoldTrainingDatasetTfidfFile) # # response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTrainingLabels.pkl') # fiveFoldTrainingLabelsFile = response['Body'].read() # fiveFoldTrainingLabels = pickle.loads(fiveFoldTrainingLabelsFile) performances_all_measures_all_folds = [] standard_deviations_all_measures_all_folds = [] # performCV = False # if len(train_data) > 4: # num_splits = 5 # performCV = True # elif len(train_data) > 1: # num_splits = len(train_data) # performCV = True # else: # # No need to do CV; simply add the one example to one of the folds randomly # performCV = False # # Do the 5 fold cross validation # Divide the new training data into five folds -- need to save this information somewhere? Or maybe just use a seed for random number generator to repeat this step, whenever necessary in future skf = KFold(n_splits=5, random_state=7654, shuffle=True) fold_number = 0 # Get optimal model parameter value from the model name optimal_model_parameter = selectedModelName.split('Alpha=')[1] optimal_model_parameter = optimal_model_parameter.split('_')[0] optimal_model_parameter = float(optimal_model_parameter) for train_indices, test_indices in skf.split(all_train_tfidf): # Note: It is okay to use tf-idf transformed data for doing CV, because tf-idf is unsupervised X_train, X_test = all_train_tfidf[train_indices], all_train_tfidf[test_indices] y_train, y_test = np.array(all_train_target)[train_indices], np.array(all_train_target)[test_indices] model_fold = None if 'Multinomial_Naive_Bayes' in selectedModelName and 'One_vs_Rest' not in selectedModelName: # Get optimal alpha for the model # The best model is already computed and best parameter is already determined mnb_alpha = optimal_model_parameter model_fold = MultinomialNB(alpha=mnb_alpha).partial_fit(X_train.todense(), y_train, classes=np.unique(y_train)) elif 'Logistic_Regression' in selectedModelName and 'One_vs_Rest' not in selectedModelName: # Get optimal alpha for the model lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model_fold = SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=None, random_state=random_state) train_tfidf_dense = X_train.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_tfidf_dense)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [y_train[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model_fold.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(y_train)) elif 'Support_Vector_Machine' in selectedModelName and 'One_vs_Rest' not in selectedModelName: # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model_fold = SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=None, random_state=random_state) train_tfidf_dense = X_train.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_tfidf_dense)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [y_train[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model_fold.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(y_train)) if 'One_vs_Rest_Multionomial_Naive_Bayes' in selectedModelName: # Get optimal alpha for the model mnb_alpha = optimal_model_parameter model_fold = OneVsRestClassifier(MultinomialNB(alpha=mnb_alpha)).partial_fit(X_train.todense(), y_train, classes=np.unique( y_train)) elif 'One_vs_Rest_Logistic_Regression' in selectedModelName: # Get optimal alpha for the model lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model_fold = OneVsRestClassifier( SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=None, random_state=random_state)) train_tfidf_dense = X_train.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_tfidf_dense)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [y_train[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model_fold.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(y_train)) elif 'One_vs_Rest_Support_Vector_Machine' in selectedModelName: # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model_fold = OneVsRestClassifier( SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=None, random_state=random_state)) train_tfidf_dense = X_train.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_tfidf_dense)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [y_train[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model_fold.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(y_train)) # evaluate the model on the test data and record performances (accu, auc, micro_precision, macro_precision, micro_recall, macro_recall, micro_f1, macro_f1, pred_y) = evaluate_model_MS(model_fold, X_train, y_train, list(set(y_train))) performances_all_measures_one_fold = [] performances_all_measures_one_fold.append(accu) performances_all_measures_one_fold.append(auc) performances_all_measures_one_fold.append(macro_precision) performances_all_measures_one_fold.append(macro_recall) performances_all_measures_one_fold.append(macro_f1) performances_all_measures_all_folds.append(performances_all_measures_one_fold) fold_number += 1 progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) all_measures_performances = np.average(performances_all_measures_all_folds, axis=0) standard_deviations_all_measures_all_folds = np.std(performances_all_measures_all_folds, axis=0) progress_text = progress_text + "\nThe five-fold cross validation performances have been successfully updated." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Manually set to 160 progressbar_value = 160 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # # else: # # Not perform CV, and add one training example to any one fold randomly # random_fold_number = random.randint(1, 4) # # fold_number = 0 # # performances_all_measures_all_folds = [] # standard_deviations_all_measures_all_folds = [] # # for fold_number in np.arange(0, 5): # if fold_number == random_fold_number: # # X_train = train_tfidf[0] # y_train = np.array(train_target)[0] # # # Need to convert the data to tfidf # old_X_train = tfidf_vect.transform(fiveFoldTrainingDatasetTfidf[random_fold_number]) # old_y_train = fiveFoldTrainingLabels[random_fold_number] # old_X_test = tfidf_vect.transform(fiveFoldTestingDatasetTfidf[random_fold_number]) # old_y_test = fiveFoldTestingLabels[random_fold_number] # # # Update each of the models in the five folds one by one in this for loop, and evaluate its performance on the test data # model_fold = fiveFoldModels[random_fold_number] # # if 'Support_Vector_Machine' in selectedModelName or 'Logistic_Regression' in selectedModelName: # # def batches(l, n): # for i in np.arange(0, len(l), n): # yield l[i:i + n] # # train_tfidf_dense = np.concatenate((old_X_train.toarray(), X_train.toarray()), axis=0) # train_target_concatenated = np.concatenate((old_y_train, y_train), axis=0) # # n_iter = 25 # np.random.seed(5647) # shuffledRange = np.arange(len(train_tfidf_dense)) # for n in np.arange(n_iter): # np.random.shuffle(shuffledRange) # shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] # shuffled_train_target = [train_target_concatenated[i] for i in shuffledRange] # # # Training the model in 10 batches # for batch in batches(np.arange(len(shuffled_train_target)), 5): # model_fold.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], # shuffled_train_target[batch[0]:batch[-1] + 1], # classes=np.unique(train_target_concatenated)) # # else: # model_fold.partial_fit(np.concatenate((old_X_train.todense(), X_train.todense()), axis=0), # np.concatenate((old_y_train, y_train), axis=0)) # # # evaluate the model on the test data and record performances # (accu, auc, micro_precision, macro_precision, micro_recall, macro_recall, micro_f1, macro_f1, # pred_y) = evaluate_model_MS(model_fold, old_X_test.todense(), old_y_test, # list(set(np.concatenate((old_y_train, y_train), axis=0)))) # # else: # # simply evauate train and test without adding any more examples to train or test parts # model_fold = fiveFoldModels[random_fold_number] # # (accu, auc, micro_precision, macro_precision, micro_recall, macro_recall, micro_f1, macro_f1, # pred_y) = evaluate_model_MS(model_fold, old_X_test.todense(), old_y_test, # list(set(np.concatenate((old_y_train, y_train), axis=0)))) # # performances_all_measures_one_fold = [] # performances_all_measures_one_fold.append(accu) # performances_all_measures_one_fold.append(auc) # performances_all_measures_one_fold.append(macro_precision) # performances_all_measures_one_fold.append(macro_recall) # performances_all_measures_one_fold.append(macro_f1) # performances_all_measures_all_folds.append(performances_all_measures_one_fold) # # progressbar_value += 20 # es_conn.update(index=index_name_gloabals, id=id, # body={"doc": {"progressbar_value": progressbar_value}}) all_measures_performances = np.average(performances_all_measures_all_folds, axis=0) all_measures_standardDeviations = np.std(performances_all_measures_all_folds, axis=0) performances_dumps = pickle.dumps(all_measures_performances) s3.put_object(Body=performances_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'performances.pkl') performancesStdev_dumps = pickle.dumps(all_measures_standardDeviations) s3.put_object(Body=performancesStdev_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'performancesStdev.pkl') progress_text = progress_text + "Model has been incrementally updated with additional data and is saved sucessfully...\n" progressbar_value += 40 programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "progressbarlabel_text": progressbarlabel_text}}) modelSavingTimeLabel = "Incrementally training the model took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) append_text_to_history_file += '5-fold Cross Validation Performance after model update: ' + '\n' perfMeasuresStr = ['Accuracy:', 'AUC:', 'Precision:', 'Recall:', 'F1:'] for i in range(len(all_measures_performances)): stringToWrite = '{:<10s}{:>10.2f}{:>4s}{:>10.2f}{:>1s}'.format(perfMeasuresStr[i], all_measures_performances[i] * 100.0, '% +/- ', all_measures_standardDeviations[ i] * 100.0, '%') append_text_to_history_file += stringToWrite + '\n' historyFile = '' historyFile.write(modelSavingTimeLabel + '.' + '\n' + '*' * 95 + '\n') append_text_to_history_file += '5-fold Cross Validation Performance after model update: ' + '\n' try: response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') history_file_old_text = response2['Body'].read().decode('utf-8') append_text_to_history_file = history_file_old_text + append_text_to_history_file s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing history file, so create a new history file and write the history into that file in S3. s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass progressbar_value += 40 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "final_progress_value": final_progress_value}}) return JsonResponse({'finalResponse': "Incremental learning finished successfully. The model has been incrementally updated with the new data."}) except Exception as e: return HttpResponse(e) def preprocess_collection_incremental_learning(username, file_open, stopterms, existing_application_numbers, printInfo, progress_text): # de-duplication try: master = [] # list for used application numbers if len(existing_application_numbers) > 0: master = existing_application_numbers repeat = [] # list for duplicate application numbers file_temp = [] # updated collection file counter = 0 num_docs = len(file_open) for index, doc in enumerate(file_open, start=1): try: apn = doc.split("\t") apn = apn[4].upper() if apn not in master: file_temp.append(doc) master.append(apn) counter = counter + 1 elif apn in master: repeat.append(apn) except Exception as e: final_progress_value = 200 progress_text = progress_text + "*" * 50 + "\n" + "ERROR: The document number %d in the file could not be processed" % index + "\n" + "-" * 50 errorString = "The document number %d in the file could not be processed" % index index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value, "errorString": errorString, "progress_text": progress_text}}) continue # step through collection docs (ie lines) file_proc = [] file_stem = [] file_stop_words_removed = [] design_count = 0 # counter for design cases utility_count = 0 # counter for utility cases for file_index, file_line in enumerate(file_temp, start=1): file_line = file_line.split("\t") # split by tab # take correct col number docs only try: no = str(file_index) file_t = file_line[1] # title file_a = file_line[2] # abstract file_c = file_line[3] # claims apn = file_line[4].lower() apd = file_line[5] asgn = file_line[6].lower() if len(file_line) > 7: upc = file_line[7].lower() if len(file_line) > 8: label = file_line[8].lower() # solve the issue if label has tab except Exception as e: # apollo4.globals.final_progress_value = 200 # apollo4.globals.errorString = "The document number %d in the file could not be processed4430" % file_index progress_text = progress_text + "*" * 50 + "\n" + "ERROR: The document number %d in the file could not be processed" % file_index + "\n" + "-" * 50 pass if apn.startswith("us2"): # filter out design cases progress_text = progress_text + "*" * 50 + "\n" + "Design patent found! App_No: %r\tUPC: %r" % ( apn, upc) + '\n' + "-" * 50 design_count = design_count + 1 elif apn.startswith("us"): # filter out non-apn lines (ie not patent data) utility_count = utility_count + 1 # stop and stem title, abstract, claim file_t_stem = stop_and_stem(file_t, stopterms) file_a_stem = stop_and_stem(file_a, stopterms) file_c_stem = stop_and_stem(file_c, stopterms) # remove stopwords from the title, abstract, claim file_t_stop = remove_stopwords(file_t, stopterms) file_a_stop = remove_stopwords(file_a, stopterms) file_c_stop = remove_stopwords(file_c, stopterms) # Output the orginal clean version of utility patent file_new_line = '\t'.join(file_line) file_proc.append(file_new_line) # Output the preprocessed version of utility patent if len(file_line) > 7: proc_doc = [no, file_t_stem, file_a_stem, file_c_stem, apd, apn, asgn, upc] proc_doc_stop = [no, file_t_stop, file_a_stop, file_c_stop, apd, apn, asgn, upc] else: proc_doc = [no, file_t_stem, file_a_stem, file_c_stem, apd, apn, asgn] proc_doc_stop = [no, file_t_stop, file_a_stop, file_c_stop, apd, apn, asgn] if len(file_line) > 8: # solve the issue if label has tab proc_doc.append(label) proc_doc_stop.append(label) proc_doc = '\t'.join(proc_doc) proc_doc_stop = '\t'.join(proc_doc_stop) file_stem.append(proc_doc) file_stop_words_removed.append(proc_doc_stop) # if printInfo: # progress_text = progress_text + "stopwords removed, terms stemmed, documents de-duplicated, design removed\n" + \ # "%d unique documents out of %d total" % (counter, num_docs) + '\n' + \ # "%d design documents out of %d total" % (design_count, num_docs) + '\n' + \ # "%d utility documents out of %d total" % (utility_count, num_docs) + '\n' output = (file_proc, file_stem, file_stop_words_removed) return output except Exception as e: return HttpResponse( "Error running the program." + str(e)) def dedup_collection_journal_incremental_learning(file_open, existing_title_and_abstracts, uid, abstract_id): new_file_list = [] new_item_list = [] try: if len(existing_title_and_abstracts) > 0: new_item_list = existing_title_and_abstracts for doc in file_open: item = '\t'.join(doc.split('\t')[uid: abstract_id + 1]) if item not in new_item_list: new_item_list.append(item) new_file_list.append(doc) return new_file_list except Exception as e: return HttpResponse( "Error running the program." + str(e)) def evaluate_model_MS(model, X_test, y_test, y_label_set): # initialize all peformance measures to -1 (i.e., not defined) accu = -1 auc = -1 micro_precision = -1 macro_precision = -1 micro_recall = -1 macro_recall = -1 micro_f1 = -1 macro_f1 = -1 try: # Note: The y_test needs to be a binary array of [n_samples, n_classes] where each value indicates presence/absence of class label in the respective column labelBinarizer = preprocessing.LabelBinarizer() labelBinarizer.fit(y_label_set) binarizedLabels = labelBinarizer.transform(y_test) if isinstance(model, MultinomialNB) or isinstance(model, KNeighborsClassifier) or isinstance(model, DeepLearningModel): # KNN Classifier's predict_proba function does not work with sparse matrices if isinstance(model, KNeighborsClassifier): y_probas = model.predict_proba(X_test) else: y_probas = model.predict_proba(X_test) try: auc = metrics.roc_auc_score(binarizedLabels, y_probas) except: # If AUC cannot be computed, set AUC value to -1, to represent not defined auc = -1 elif isinstance(model, OneVsRestClassifier) and isinstance(model.estimator, MultinomialNB): y_probas = model.predict_proba(X_test) try: auc = metrics.roc_auc_score(binarizedLabels, y_probas) except: # If AUC cannot be computed, set AUC value to -1, to represent not defined auc = -1 else: # For LR and SVM: y_decision = model.decision_function(X_test) try: auc = metrics.roc_auc_score(binarizedLabels, y_decision) except: # If AUC cannot be computed, set AUC value to -1, to represent not defined auc = -1 pred_y = model.predict(X_test) accu = metrics.accuracy_score(y_test, pred_y) micro_precision = metrics.precision_score(y_test, pred_y, average='micro') macro_precision = metrics.precision_score(y_test, pred_y, average='macro') micro_recall = metrics.recall_score(y_test, pred_y, average='micro') macro_recall = metrics.recall_score(y_test, pred_y, average='macro') micro_f1 = metrics.f1_score(y_test, pred_y, average='micro') macro_f1 = metrics.f1_score(y_test, pred_y, average='macro') return (accu, auc, micro_precision, macro_precision, micro_recall, macro_recall, micro_f1, macro_f1, pred_y) except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def run_IL_trainFromScratchFromGUI(request): # ... try: # Assuming that the client-side has already selected the options before running the program. # Assuming that the request from the client side will have all the fields necessary for running the program. if request.method == "POST": gui_parameters_data = request.body.decode('utf-8'); username = request.user.username; gui_parameters = json.loads(gui_parameters_data); selectedProjectName = gui_parameters['saveProjectName'] selectedModelName = gui_parameters['trainedModelName'] programRunStartTime = datetime.now() modelValidation = retrainModelFromScratch(None, None, selectedProjectName, selectedModelName, None, programRunStartTime, True, username) final_progress_value = 200 index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"final_progress_value": final_progress_value}}) if modelValidation == True: return JsonResponse({'finalResponse': 'incremental learning sucessfully executed'}) else: errorString = modelValidation.content es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) except Exception as e: final_progress_value = 200 errorString = e es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"errorString": errorString, "final_progress_value": final_progress_value}}) return HttpResponse(e) def retrainModelFromScratch(train_data, train_target, selectedProjectName, selectedModelName, targetPerformanceMeasure, programRunStartTime, runFromGUI, username): try: index_name_gloabals = 'apolloglobals' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] trainingDataType = res['hits']['hits'][0]['_source']['trainingDataType'] trainingFileName = res['hits']['hits'][0]['_source']['trainingFileName'] progress_text = res['hits']['hits'][0]['_source']['progress_text'] if runFromGUI: train_data_IL = res['hits']['hits'][0]['_source']['train_data'] train_target_IL = res['hits']['hits'][0]['_source']['train_target'] progressbar_value_IL = res['hits']['hits'][0]['_source']['progress_value'] targetPerformanceMeasure_IL = res['hits']['hits'][0]['_source']['targetPerformanceMeasure_IL'] train_data = train_data_IL train_target = train_target_IL progressbar_value = progressbar_value_IL targetPerformanceMeasure = targetPerformanceMeasure_IL # This function is only applicable to SUPERVISED learning mode, which may contain new classes. This does not apply to UNSUPERVISED learning case. # Retrain the selected model by combining the old patent data + old journal data + new data # output_folder = os.path.dirname(selectedTrainingFilePath) progressbar_value = 0 # progress_text = '' progressbar_maximum = 80 + progressbar_value progress_text = progress_text + "Training the model from scratch using the additional data..." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "progress_text": progress_text}}) # If old training data exists, load the data and preprocess the data # Once loaded, append the data to train_data and train_target to train the model from scratch s3 = boto3.client('s3') key_value = 'classificationprojects/' key_value += selectedProjectName + '/supervised/' key_value += selectedModelName + '/' try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_patents.txt') oldPatentsFile = response['Body'].read() file_sample_open = oldPatentsFile.decode('utf-8') file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] existing_application_numbers = [doc.split('\t')[4].upper() for doc in file_sample_open] progress_text = progress_text + "\nFound existing " + str( len(file_sample_open)) + " patents! \nPreprocessing documents...\n" # Set value of progressbar to 5 once the training dataset is loaded progressbar_value += 5 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops # append user-provided stop words try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'user_defined_stopwords.pkl') user_defined_stopwords = pickle.loads(response['Body'].read()) # user_defined_stopwords = pickle.load(user_defined_stopwords_file) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so do nothing user_defined_stopwords = [] elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass if len(user_defined_stopwords) > 0: stops = stops + user_defined_stopwords else: stops = stops # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 # Preprocess the sample file (file_sample_proc, file_sample_stem, temp) = preprocess_collection(file_sample_open, stopterms, True, progress_text) file_sample = list(filter(None, file_sample_stem)) title_samples = [doc.split('\t')[1].strip('\r').strip('\n') for doc in file_sample] abstract_samples = [doc.split('\t')[2].strip('\r').strip('\n') for doc in file_sample] claim_samples = [doc.split('\t')[3].strip('\r').strip('\n') for doc in file_sample] label_samples = [doc.split('\t')[8].lower().strip('\r').strip('\n') for doc in file_sample] labels = sorted(list(set(label_samples))) train_data_exisiting_patents = [' '.join(doc) for doc in zip(title_samples, abstract_samples, claim_samples)] train_target_existing_patents = label_samples # Append the existing data to new data train_data = train_data + train_data_exisiting_patents train_target = train_target + train_target_existing_patents except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so no need to deduplicate based on previous data. existing_application_numbers = [] elif e.response['Error']['Code'] == "NoSuchKey": existing_application_numbers = [] pass else: existing_application_numbers = [] pass # End patent training data try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_journals.txt') oldJournalsFile = response['Body'].read() file_sample_open = oldJournalsFile.decode('utf-8') file_sample_open = file_sample_open.split('\n') file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " journals! \nPreprocessing documents...\n" # Set value of progressbar to 5 once the training dataset is loaded progressbar_value += 5 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Remove the duplicated documents based on "title" file_sample_open = dedup_collection_journal(file_sample_open, 1, 2) # Preprocessing for scoupus data file_sample_open = preprocess_collection_journal(file_sample_open) # Take the stopwords from the GUI and add them to the stopwords list # append user-provided stop words try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'user_defined_stopwords.pkl') user_defined_stopwords_file = response['Body'].read() user_defined_stopwords = pickle.loads(user_defined_stopwords_file) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent (or journal) training data, so do nothing user_defined_stopwords = [] elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass if isinstance(user_defined_stopwords, str): if user_defined_stopwords.strip('\r').strip('\n') == '': user_defined_stopwords = [] file_sample_data = [' '.join([stop_and_stem_journal(doc.split('\t')[1], user_defined_stopwords) , stop_and_stem_journal(doc.split('\t')[2], user_defined_stopwords) ]) for doc in file_sample_open] # Training Phase label_samples = [doc.split('\t')[-1].lower().strip('\r').strip('\n') for doc in file_sample_open] labels = sorted(list(set(label_samples))) train_data_existing_journals = file_sample_data train_target_existing_journals = label_samples train_data = train_data + train_data_existing_journals train_target = train_target + train_target_existing_journals except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing journal training data, so no need to deduplicate based on previous data. pass elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass # end journal training data preprocessing # Display information once all data is pre-processed progress_text = progress_text + "Finished preprocessing the existing data" + "\nThe training data contains a total of " + str( len(train_data)) + " unique documents." # Set value of progressbar to 10 once the training dataset is preprocessed progressbar_value += 5 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) numInstancesTrainingData = len(train_data) trainingDataStats = [] leastDocumentsForAClass = 5 # initialize to 5 for label in set(train_target): distribution = str(np.round(train_target.count(label) * 100.0 / len(train_target) * 1.0, 2)) + '%' trainingDataStats.append([label, train_target.count(label), distribution]) if train_target.count(label) < leastDocumentsForAClass: leastDocumentsForAClass = train_target.count(label) # Make sure that there are at least 5 documents for each class: this is required to perform 5-fold cross validation if leastDocumentsForAClass < 5: progress_text = progress_text + "*" * 50 + "\nThe program requires at least 5 training examples for each class. Please provide at least 5 training examples for each class and re-run the program." + "*" * 50 progressbar_value = 0 # Return this message and exit. No need to run anything further in this function. final_progress_value = 200 errorString = "The program requires at least 5 training examples for each class. Please provide at least 5 training examples for each class and re-run the program." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) # Else, just continue with incremental learning progress_text = progress_text + "\nStarting model training..." cv = CountVectorizer() tfidf = TfidfTransformer() # Changed the n-grams to (1,5) in the line below, and max_df from 0.5 to 0.8, based on side-experiments tfidf_vect = TfidfVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, max_df=0.8, max_features=200000, stop_words='english', use_idf=True) # tf-idf with params train_tfidf = tfidf_vect.fit_transform(train_data) # Set value of progressbar to 15 once the training dataset is vectorized progress_text = progress_text + "\nOptimizing model parameters..." progressbar_value += 5 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) tfidfVectorizer = tfidf_vect # Model and model parameters model = None svm_alpha = 1.0 # default value mnb_alpha = 0.001 # default value lrl2_alpha = 1.0 # default value svm_kernel = 'linear' # default value class_weight = None # default value automatic_mode = False optimal_model_parameter = -1 str_model_name = 'None' str_parameter_name = 'None' if 'One_vs_Rest_Multinomial_Naive_Bayes' in selectedModelName: str_model_name = 'ovrmnb' elif 'One_vs_Rest_Logistic_Regression' in selectedModelName: str_model_name = 'ovrlrl2' elif 'One_vs_Rest_Support_Vector_Machine' in selectedModelName: str_model_name = 'ovrsvm' elif 'Multinomial_Naive_Bayes' in selectedModelName: str_model_name = 'mnb' elif 'Logistic_Regression' in selectedModelName: str_model_name = 'lrl2' elif 'Support_Vector_Machine' in selectedModelName: str_model_name = 'svm' elif 'Deep_Learning_BERT' in selectedModelName: str_model_name = 'bert' elif 'Deep_Learning_RoBERTa' in selectedModelName: str_model_name = 'roberta' elif 'Deep_Learning_XLNet' in selectedModelName: str_model_name = 'xlnet' str_parameter_name = 'Alpha = ' if str_model_name == 'mnb': # Get optimal alpha for the model mnb_alpha = -1 if automatic_mode == False: mnb_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForMNB_alpha( train_tfidf.todense(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: # The best model is already computed and best parameter is already determined mnb_alpha = optimal_model_parameter model = MultinomialNB(alpha=mnb_alpha).partial_fit(train_tfidf.todense(), train_target, classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Multinomial_Naive_Bayes_Alpha=' + str(mnb_alpha) elif str_model_name == 'lrl2': # Get optimal alpha for the model lrl2_alpha = -1 if automatic_mode == False: lrl2_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForLR_alpha( train_tfidf.todense(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # output of the model is dependent on the interaction between alpha and the number of epochs (n_iter) # When alpha is very small, n_iter must be large to compensate for the slower learning rate # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model = SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=class_weight, random_state=random_state) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Logistic_Regression_Alpha=' + str(lrl2_alpha) elif str_model_name == 'svm': # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if automatic_mode == False: svm_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForSVM_alpha( train_tfidf.toarray(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model = SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=class_weight, random_state=random_state) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'Support_Vector_Machine_Alpha=' + str(svm_alpha) # Need to update the clibration model that will be used to output probabilities in the excel sheet model_isotonic_calibration = None model_sigmoid_calibration = None # calibrate probabilities that will be used by the excel sheet if len(train_target) > 500: model_isotonic_calibration = CalibratedClassifierCV(model, cv="prefit", method='isotonic') model_isotonic_calibration.fit(train_tfidf.todense(), train_target) else: model_sigmoid_calibration = CalibratedClassifierCV(model, cv="prefit", method='sigmoid') model_sigmoid_calibration.fit(train_tfidf.todense(), train_target) if str_model_name == 'ovrmnb': # Get optimal alpha for the model mnb_alpha = -1 if automatic_mode == False: mnb_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRMNB_alpha( train_tfidf.todense(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: # The best model is already computed and best parameter is already determined mnb_alpha = optimal_model_parameter model = OneVsRestClassifier(MultinomialNB(alpha=mnb_alpha)).partial_fit(train_tfidf.todense(), train_target, classes=np.unique(train_target)) trainedModel = model trainedModelName = 'One_vs_Rest_Multinomial_Naive_Bayes_Alpha=' + str(mnb_alpha) elif str_model_name == 'ovrlrl2': # Get optimal alpha for the model lrl2_alpha = -1 if automatic_mode == False: lrl2_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRLR_alpha( train_tfidf.todense(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: lrl2_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model = OneVsRestClassifier( SGDClassifier(loss='log', penalty='l2', alpha=lrl2_alpha, class_weight=class_weight, random_state=random_state)) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) trainedModel = model trainedModelName = 'One_vs_Rest_Logistic_Regression_Alpha=' + str(lrl2_alpha) elif str_model_name == 'ovrsvm': # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if automatic_mode == False: svm_alpha, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParameterForOVRSVM_alpha( train_tfidf.todense(), train_data, train_target, targetPerformanceMeasure) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: svm_alpha = optimal_model_parameter random_state = np.random.RandomState(seed=87654) # When using SGD, the partial_fit method has to be applied on different batches of the training data, and we need to epoch multiple times model = OneVsRestClassifier( SGDClassifier(loss='hinge', penalty='l2', alpha=svm_alpha, class_weight=class_weight, random_state=random_state)) train_tfidf_dense = train_tfidf.toarray() def batches(l, n): for i in np.arange(0, len(l), n): yield l[i:i + n] n_iter = 25 np.random.seed(5647) shuffledRange = np.arange(len(train_data)) for n in np.arange(n_iter): np.random.shuffle(shuffledRange) shuffled_train_tfidf = [train_tfidf_dense[i] for i in shuffledRange] shuffled_train_target = [train_target[i] for i in shuffledRange] # Training the model in 10 batches for batch in batches(np.arange(len(shuffled_train_target)), 5): model.partial_fit(shuffled_train_tfidf[batch[0]:batch[-1] + 1], shuffled_train_target[batch[0]:batch[-1] + 1], classes=np.unique(train_target)) # Need to update the clibration model that will be used to output probabilities in the excel sheet model_isotonic_calibration = None model_sigmoid_calibration = None # calibrate probabilities that will be used by the excel sheet if len(train_target) > 500: model_isotonic_calibration = CalibratedClassifierCV(model, cv="prefit", method='isotonic') model_isotonic_calibration.fit(train_tfidf.todense(), train_target) else: model_sigmoid_calibration = CalibratedClassifierCV(model, cv="prefit", method='sigmoid') model_sigmoid_calibration.fit(train_tfidf.todense(), train_target) trainedModel = model trainedModelName = 'One_vs_Rest_Support_Vector_Machine_Alpha=' + str(svm_alpha) elif str_model_name == 'bert' or str_model_name == 'roberta' or str_model_name == 'xlnet': # Get optimal alpha for the model, performance of 5-fold CV, and standard deviation of performance if str_model_name == 'bert': dl_model_type = 'bert' dl_model_name = 'bert-base-cased' elif str_model_name == 'roberta': dl_model_type = 'roberta' dl_model_name = 'roberta-base' elif str_model_name == 'xlnet': dl_model_type = 'xlnet' dl_model_name = 'xlnet-base-cased' if automatic_mode == False: torch.cuda.empty_cache() # Need to get userName from the GUI to use here DEEP_LEARNING_OUTPUT_DIR = './DeepLearningOutputs/' + username + '/' optimal_parameters, max_performance_measure, standard_deviation, all_measures_performances, all_measures_standardDeviations = getOptimalParametersForDeepLearning( dl_model_type, dl_model_name, train_data, targetPerformanceMeasure, DEEP_LEARNING_OUTPUT_DIR) trainingDataPerformances = all_measures_performances trainingDataPerformancesStandardDeviation = all_measures_standardDeviations fiveFoldTrainingDatasetTfidf = None fiveFoldTestingDatasetTfidf = None fiveFoldTrainingLabels = None fiveFoldTestingLabels = None fiveFoldModels = None else: optimal_parameters = optimal_model_parameter random_state = np.random.RandomState(seed=87654) batchSize = optimal_parameters[0] maxSequenceLength = optimal_parameters[1] total_labels = len(Counter(train_target)) # Set the output directory where temporary model results will be stored torch.cuda.empty_cache() # Need to get userName from the GUI to use here DEEP_LEARNING_OUTPUT_DIR = './DeepLearningOutputs/' + username + '/' trainedModel = DeepLearningModel(dl_model_type, dl_model_name, batchSize, maxSequenceLength, num_epochs=30, random_state=4987, output_dir=DEEP_LEARNING_OUTPUT_DIR) # Train the model trainedModel.fit(train_data) if str_model_name == 'bert': trainedModelName = 'Deep Learning_BERT_BatchSize=' + str( optimal_parameters[0]) + '_MaxSequenceLength=' + str(optimal_parameters[1]) elif str_model_name == 'roberta': trainedModelName = 'Deep Learning_RoBERTa_BatchSize=' + str( optimal_parameters[0]) + '_MaxSequenceLength=' + str(optimal_parameters[1]) elif str_model_name == 'xlnet': trainedModelName = 'Deep Learning_XLNet_BatchSize=' + str( optimal_parameters[0]) + '_MaxSequenceLength=' + str(optimal_parameters[1]) progressbar_value += 10 historyFilename = 'history.txt' # Need to change this to elastic search Praveen remember trainingDataStatistics = trainingDataStats trainingDataStats_dumps = json.dumps(trainingDataStats) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "trainingDataStats_dumps": trainingDataStats_dumps}}) INDEX_NAME = 'savemodelsupervised' query = {"query": {"bool": {"must": [{"match": {"saveProjectName.keyword": selectedProjectName}}, {"match": {"model_data.trainedModelName.keyword": selectedModelName}}]}}} res = es_conn.search(index=INDEX_NAME, body=query) saveid = res['hits']['hits'][0]['_id'] es_conn.update(index=INDEX_NAME, id=saveid, body={"doc": {"model_data": {"trainingDataTables": trainingDataStats_dumps}}}) progressbar_value += 1 tfidfVectorizer_dumps = pickle.dumps(tfidfVectorizer) s3.put_object(Body=tfidfVectorizer_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') progressbar_value += 1 # Save the model, model's CV performance, and CV performance STDEV model_dumps = pickle.dumps(trainedModel) s3.put_object(Body=model_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') progressbar_value += 1 performances_dumps = list(trainingDataPerformances) es_conn.update(index=INDEX_NAME, id=saveid, body={"doc": {"model_data": {"trainingDataPerformances": performances_dumps}}}) progressbar_value += 1 performancesStdev_dumps = list(trainingDataPerformancesStandardDeviation) # s3.put_object(Body=performancesStdev_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'performancesStdev.pkl') es_conn.update(index=INDEX_NAME, id=saveid, body={"doc": { "model_data": {"trainingDataPerformancesStandardDeviation": performancesStdev_dumps}}}) progressbar_value += 1 # fiveFoldTrainingDatasetTfidf_dumps = pickle.dumps(fiveFoldTrainingDatasetTfidf) # s3.put_object(Body=fiveFoldTrainingDatasetTfidf_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTrainingDatasetTfidf.pkl') # progressbar_value += 1 # # fiveFoldTestingDatasetTfidf_dumps = pickle.dumps(fiveFoldTestingDatasetTfidf) # s3.put_object(Body=fiveFoldTestingDatasetTfidf_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTestingDatasetTfidf.pkl') # progressbar_value += 1 # # fiveFoldTrainingLabels_dumps = pickle.dumps(fiveFoldTrainingLabels) # s3.put_object(Body=fiveFoldTrainingLabels_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTrainingLabels.pkl') # progressbar_value += 1 # # fiveFoldTestingLabels_dumps = pickle.dumps(fiveFoldTestingLabels) # s3.put_object(Body=fiveFoldTestingLabels_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldTestingLabels.pkl') # progressbar_value += 1 # # fiveFoldModels_dumps = pickle.dumps(fiveFoldModels) # s3.put_object(Body=fiveFoldModels_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + 'fiveFoldModels.pkl') # progressbar_value += 1 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # Save the isotonic or sigmoid calibration model for SVM classifier if str_model_name == 'svm' or model == 'ovrsvm': if model_isotonic_calibration != None: # Save the isotonic calibration model model_isotonic_calibration_dumps = pickle.dumps(model_isotonic_calibration) s3.put_object(Body=model_isotonic_calibration_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_isotonic_calibration.pkl') # Remove the sigmoid calibration model, because this model was trained on less than 500 training data examples try: response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') response2 = s3.delete_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": pass elif e.response['Error']['Code'] == "NoSuchKey": pass else: pass if model_sigmoid_calibration != None: # Save the sigmoid calibration model model_sigmoid_calibration_dumps = pickle.dumps(model_sigmoid_calibration) s3.put_object(Body=model_sigmoid_calibration_dumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'model_sigmoid_calibration.pkl') # pathToSaveTheProjectTrainingData = selectedModelName + 'trainingData/' # Save the training and testing data sets for future use # copyfile(selectedTrainingFilePath, pathToSaveTheProjectTrainingData + os.path.basename(selectedTrainingFilePath)) # Need to save in s3 trainingDataPatents = 'training_data_patents.txt' + '_' + str(datetime.now()) trainingDataJournals = 'training_data_journals.txt' + '_' + str(datetime.now()) # Append the patent data to patent training file, and journal data to journal training file if trainingDataType == 'Patent': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataPatents) elif trainingDataType == 'Journal': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + trainingDataJournals) progressbar_value += 1 append_text_to_history_file = "" append_text_to_history_file += '-' * 100 + '\n' + 'username: ' + username + '\n' # append_text_to_history_file += "Program run started at " + apollo4.globals.programRunStartTime.strftime( # "%I:%M%p on %B %d, %Y") + "\n" # historyFile = open('history.txt', 'a') append_text_to_history_file += '-' * 100 + '\n' if trainingDataType == 'Patent': append_text_to_history_file += 'Supervised learning model ' + trainedModelName + ' was trained on the PATENT training data file: ' + trainingFileName + '.' + '\n' elif trainingDataType == 'Journal': append_text_to_history_file += 'Supervised learning model ' + trainedModelName + ' was trained on the JOURNAL training data file: ' + trainingFileName + '.' + '\n' # write the number of instances and classes for tracking purposes numInstancesInTrainingData = 0 stringToDisplayTrainingDataStats = '{:<40s}{:>20s}{:>20s}'.format('Class', '# Examples', 'Class %') + '\n' for entry in trainingDataStats: stringToDisplayTrainingDataStats += '{:<40s}{:>20s}{:>20s}'.format(str(entry[0]), str(entry[1]), str(entry[2])) + '\n' numInstancesInTrainingData += int(entry[1]) append_text_to_history_file += 'Total number of documents in the training data: ' + str( numInstancesInTrainingData) + '\n' append_text_to_history_file += 'Total number of classes in the training data: ' + str( len(trainingDataStats)) + '\n' append_text_to_history_file += 'The model parameters were optimized for \'' + targetPerformanceMeasure + '\'.' + '\n' append_text_to_history_file += '5-fold Cross Validation Performance: ' + '\n' append_text_to_history_file += '5-fold Cross Validation Performance: ' + '\n' perfMeasuresStr = ['Accuracy:', 'AUC:', 'Precision:', 'Recall:', 'F1:'] for i in range(len(trainingDataPerformances)): stringToWrite = '{:<10s}{:>10.2f}{:>4s}{:>10.2f}{:>1s}'.format(perfMeasuresStr[i], trainingDataPerformances[i] * 100.0, '% +/- ', trainingDataPerformancesStandardDeviation[ i] * 100.0, '%') append_text_to_history_file += stringToWrite + '\n' # append_text_to_history_file += progressbarlabel_text + '.' + '\n' try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') history_file_old_text = response['Body'].read().decode('utf-8') append_text_to_history_file = history_file_old_text + append_text_to_history_file s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing history file, so create a new history file and write the history into that file in S3. s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') else: s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') progressbar_value += 1 programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel progress_text = progress_text + "\nIncremental learning finished successfully." progressbar_value += 5 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text, "progressbarlabel_text": progressbarlabel_text, "final_progress_value": final_progress_value}}) return True except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def incrementalUnsupervisedLearning(request): global response try: # Assuming that the client-side has already selected the options before running the program. # Assuming that the request from the client side will have all the fields necessary for running the program. if request.method == 'GET': return response elif request.method == "POST": gui_parameters = request.POST.getlist('inputData')[0] gui_parameters = json.loads(gui_parameters); finalTrainingData = request.FILES.getlist('trainFile') # training_data = request.FILES.getlist('file').read().decode("ISO-8859-1") training_data = Process_All_Files(finalTrainingData) trainingFileName = gui_parameters['training_file_name'] trainingDataType = gui_parameters['training_data_type'] selectedModelName = gui_parameters['trainedModelName'] selectedProjectName = gui_parameters['saveProjectName'] username = request.user.username; current_tab = 6 progressbar_maximum = 200 progressbar_value = 0 progress_text = '' progress_text = progress_text + "Starting incremental learning process...\n" # Set the text in progressbarlabel programRunStartTime = datetime.now() programRunStartTimeLabel = 'Progress: Program run started at ' + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + ' (UTC time). ' progressbarlabel_text = programRunStartTimeLabel progress_text = progress_text + '-' * 75 + '\n' + "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + \ '-' * 75 + '\n' + "Starting incremental learning process..." # The path to the model depends on the index of selected project and index of selected model numberOfClusters = int(re.search('NumClusters=(.*)_TopWords', selectedModelName).group(1)) number_of_top_words = int(selectedModelName.split("TopWords=")[1].replace('/', '')) progressbar_maximum = 100 + 10 * numberOfClusters progressbar_value = 10 index_name_gloabals = 'apolloglobalsunsupervised' query = {"query": {"bool": {"must": {"match": {"username.keyword": username}}}}} res = es_conn.search(index=index_name_gloabals, body=query) id = res['hits']['hits'][0]['_id'] es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_maximum": progressbar_maximum, "current_tab": current_tab, "progressbar_value": progressbar_value, "testingFileName": trainingFileName, "trainedModelName": selectedModelName, "testingDataType": trainingDataType, "progress_text": progress_text , "progressbarlabel_text": progressbarlabel_text, "numberOfClusters": numberOfClusters, "number_of_top_words": number_of_top_words}}) historyFilename = 'history.txt' s3 = boto3.client('s3') key_value = 'classificationprojects/' key_value += selectedProjectName + '/unsupervised/' key_value += selectedModelName + '/' # The code for loading and pre-processing the data is different for patent and journal data if trainingDataType == 'Patent': file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents! \nPreprocessing documents...\n" # Set value of progressbar to 10 once the training dataset is loaded progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Build the stop words stops = stopwords aux_stops = './static/AuxStops.txt' aux_stops = open(aux_stops, 'r').read() aux_stops = re.sub("[^a-zA-Z ]", " ", aux_stops) # remove non-alphanumeric aux_stops = " ".join(aux_stops.split()) # split by any whitespace and rejoin w/ space aux_stops = aux_stops.split(' ') aux_stops = list(filter(None, aux_stops)) # append auxiliary stops stops = stops + aux_stops user_defined_stopwords = [] stops = stops + user_defined_stopwords # Bulid stopterm dictionary stopterms = {} for stop in stops: if stop in stopterms: stopterms[stop] += 1 else: stopterms[stop] = 1 # Preprocess the sample file # Also, we need to check the original data file for any duplicates in the new data # Load the latest training data for this model and use the application number column to deduplicate the documents from previous patents data try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_patents.txt') oldPatentsFile = response['Body'].read() # file_old_training_data_open = codecs.open(oldPatentsFile).read() file_old_training_data_open = oldPatentsFile.decode('utf-8') file_old_training_data_open = file_old_training_data_open.split('\n') # split by new line file_old_training_data_open = list(filter(None, file_old_training_data_open)) # delete empty lines # Now, the first line is header, so remove the first line file_old_training_data_open = file_old_training_data_open[1:] existing_application_numbers = [doc.split('\t')[4].upper() for doc in file_old_training_data_open] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing patent training data, so no need to deduplicate based on previous data. existing_application_numbers = [] elif e.response['Error']['Code'] == "NoSuchKey": existing_application_numbers = [] pass else: existing_application_numbers = [] pass progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) (file_test_proc, file_sample_stem, unlabeled_data_stop_words_removed) = preprocess_collection_incremental_learning(username, file_sample_open, stopterms, existing_application_numbers, True, progress_text) file_sample = list(filter(None, file_sample_stem)) progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) if len(file_sample) < 1: progressbar_value = 100 + numberOfClusters # Simply display the error message and exit the function. final_progress_value = 200 errorString = 'The additional data file does not contain any new Patents for training the unsupervised learning model. \nCannot perform incremental learning in this case.5436' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) title_samples = [doc.split('\t')[1] for doc in file_sample] abstract_samples = [doc.split('\t')[2] for doc in file_sample] claim_samples = [doc.split('\t')[3] for doc in file_sample] unlabeled_data = [' '.join(doc) for doc in zip(title_samples, abstract_samples, claim_samples)] # End patent training data elif trainingDataType == 'Journal': file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines # Now, the first line is header, so remove the first line file_sample_open = file_sample_open[1:] progress_text = progress_text + "\nFound " + str( len(file_sample_open)) + " documents in the additional data file! \nPreprocessing documents...\n" # Set value of progressbar to 10 once the training dataset is loaded progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # Load the latest training data for this model and use the title and abstract columns to deduplicate the documents from previous journals data try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_journals.txt') oldJournalsFile = response['Body'].read() # The training data already exists, and we need to append the new data to this data as well. file_old_training_data_open = oldJournalsFile.decode('utf-8') file_old_training_data_open = file_old_training_data_open.split('\n') # split by new line file_old_training_data_open = list(filter(None, file_old_training_data_open)) # delete empty lines # Now, the first line is header, so remove the first line file_old_training_data_open = file_old_training_data_open[1:] existing_title_and_abstracts = ['\t'.join([doc.split('\t')[1], doc.split('\t')[2]]) for doc in file_old_training_data_open] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing journal training data, so no need to deduplicate based on previous data. existing_title_and_abstracts = [] elif e.response['Error']['Code'] == "NoSuchKey": existing_title_and_abstracts = [] pass else: existing_title_and_abstracts = [] pass progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) # Remove the duplicated documents based on "title" file_data_open = dedup_collection_journal_incremental_learning(file_sample_open, existing_title_and_abstracts, 1, 2) progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) if len(file_data_open) < 1: progressbar_value = 100 + numberOfClusters # Simply display the error message and exit the function. final_progress_value = 200 errorString = 'The additional data file does not contain any new journals for training the unsupervised learning model. \nCannot perform incremental learning in this case.5503' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "final_progress_value": final_progress_value, "errorString": errorString}}) return JsonResponse({'finalResponse': 'Error'}) # Preprocessing for scoupus data file_test_proc = preprocess_collection_journal(file_data_open) progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) user_defined_stopwords = [] unlabeled_data = [] unlabeled_data_stop_words_removed = [] for doc in file_data_open: stop_and_stem_document_title, stop_document_title = stop_and_stem_journal_2(doc.split('\t')[1], user_defined_stopwords) stop_and_stem_document_abstract, stop_document_abstract = stop_and_stem_journal_2( doc.split('\t')[2], user_defined_stopwords) unlabeled_data.append(' '.join([stop_and_stem_document_title, stop_and_stem_document_abstract])) unlabeled_data_stop_words_removed.append(' '.join([stop_document_title, stop_document_abstract])) progressbar_value += 10 ## Set directory for the output folder # output_folder = os.path.dirname(selectedTrainingFilePath) progress_text = progress_text + "Starting unsupervised learning process..." + "\nFound " + str( len(unlabeled_data)) + " new documents in the additional data file!\n" es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "progress_text": progress_text}}) # The code for patent and journal testing data is different because it required different preprocessing user_defined_stopwords = [] # Load the tfidf vectorizer that was previously saved response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'tfidf_vect.pkl') tfidf_vect_file = response['Body'].read() tfidf_vect = pickle.loads(tfidf_vect_file) unlabled_data_tfidf = tfidf_vect.transform(unlabeled_data) # Model and model parameters model = None number_of_clusters = numberOfClusters progress_text = progress_text + "\nPerforming clustering and topic extraction on the data..." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) output_filename = re.sub('.txt', '_Results_Topic_Modeling.txt', trainingFileName) # 1. Load the model response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') model_file = response['Body'].read() model = pickle.loads(model_file) model.partial_fit(unlabled_data_tfidf) modelDumps = pickle.dumps(model) # simply copy the file to the trainingData folder with the timestamp information # training_data = pickle.dumps(training_data) # s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, # Key=key_value + '/trainingData/' + os.path.basename( # trainingFileName) + '_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + # os.path.splitext(trainingFileName)[1]) # Append the patent data to patent training file, and journal data to journal training file if trainingDataType == 'Patent': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_patents.txt') elif trainingDataType == 'Journal': s3.put_object(Body=training_data, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'training_data_journals.txt') # Write the newly updated model back to S3; assuming that S3 overwrites the old file with the new one, and no settings have been changed for S3. s3.put_object(Body=modelDumps, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'trainedModel.pkl') progressbar_value += 20 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) clusters = model.labels_.tolist() test_patent_others = {'content': unlabeled_data, 'file_others': file_test_proc, 'content_stop_words_removed': unlabeled_data_stop_words_removed, 'cluster': clusters} # 1. Load the previous data frame to run topic modeling again on each set of document clusters response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'topicModelingFrame.pkl') old_frame_file = response['Body'].read() oldFrame = pickle.loads(old_frame_file) frame = pd.DataFrame(test_patent_others, index=[clusters], columns=['content', 'file_others', 'content_stop_words_removed', 'cluster']) frame = frame.append(oldFrame) clusterTopicsAndCounts = [] clustering_successful = False # output lda other topics fout_others = '' for no in range(numberOfClusters): try: # sometimes, there is no document in the group, so handle that case with try and except patent_group = frame.groupby(frame['cluster']).get_group(no) except: # continue, because there is no document in this cluster. Move on to the topic modeling for next cluster continue patent_tac = patent_group.ix[:, 0].tolist() patent_org = patent_group.ix[:, 1].tolist() lda_tf_vect = TfidfVectorizer(max_df=0.8, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = None try: tf = lda_tf_vect.fit_transform(patent_tac) except Exception as e: lda_tf_vect = TfidfVectorizer(max_df=1.0, min_df=1, max_features=200000, ngram_range=(1, 5), use_idf=True, stop_words='english') tf = lda_tf_vect.fit_transform(patent_tac) # LDA Model lda = LatentDirichletAllocation(n_components=1, max_iter=20, learning_method='online', learning_offset=50, random_state=0).fit(tf) lda_feature_names = lda_tf_vect.get_feature_names() lda_topics = get_topic_list(lda, lda_feature_names, number_of_top_words) clusterTopicsAndCounts.append([len(patent_tac), lda_topics[0]]) doc_topic = lda.transform(tf) doc_topic_index = doc_topic.argmax(axis=1) for doc, doc_topic_i in zip(patent_org, doc_topic_index): fout_others += '\t'.join( [doc.strip('\r').strip('\n'), lda_topics[doc_topic_i].strip('\r').strip('\n')]) + '\n' clustering_successful = True progressbar_value += 10 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value}}) if clustering_successful == True: progress_text = progress_text + '\nTopic extraction and clustering completed.' es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # Load the topic modeling results in the treeview # Compute the columns: (i) # instances, (ii) Topics extracted clusterTopicsAndCounts = clusterTopicsAndCounts dumps_clusterTopicsAndCounts = json.dumps(clusterTopicsAndCounts) progress_text = progress_text + "\nPlease download the " + output_filename + " file and check all the results in the file." es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progress_text": progress_text}}) # historyFile = open(historyFilename, append_write) append_text_to_history_file = "\n" append_text_to_history_file += '-' * 100 + '\n' append_text_to_history_file += 'username: ' + username append_text_to_history_file += "Program run started at " + programRunStartTime.strftime( "%I:%M%p on %B %d, %Y") + " (UTC time).\n" + '-' * 100 + '\n' ## Update the history based on whether the updated model is supervised or unsupervised model. if trainingDataType == 'Patent': append_text_to_history_file += 'Unsupervised learning model ' + selectedModelName + ' was incrementally updated using the PATENT training data file: ' + trainingFileName + '.' + '\n' elif trainingDataType == 'Journal': append_text_to_history_file += 'Unsupervised learning model ' + selectedModelName + ' was incrementally updated using the JOURNAL training data file: ' + trainingFileName + '.' + '\n' INDEX_NAME = 'savemodelunsupervised' query = {"query": {"bool": {"must": [{"match": {"saveProjectName.keyword": selectedProjectName}}, {"match": { "model_data.trainedModelName.keyword": selectedModelName}}]}}} res = es_conn.search(index=INDEX_NAME, body=query) saveid = res['hits']['hits'][0]['_id'] es_conn.update(index=INDEX_NAME, id=saveid, body={"doc": {"model_data": {"clusterTopicsAndCounts": dumps_clusterTopicsAndCounts}}}) programRunEndTime = datetime.now() timeDifference = relativedelta(programRunEndTime, programRunStartTime) programRunStartTimeLabel = "Program run took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) progressbarlabel_text = programRunStartTimeLabel modelSavingTimeLabel = "Incrementally updating the model took %d days %d hours %d minutes %d seconds." % ( timeDifference.days, timeDifference.hours, timeDifference.minutes, timeDifference.seconds) es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbarlabel_text": progressbarlabel_text}}) append_text_to_history_file += modelSavingTimeLabel + '.' + '\n' + '*' * 95 + '\n' # Need to add history file in all tabs and uncomment below code try: response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') history_file_old_text = response['Body'].read().decode('utf-8') append_text_to_history_file = history_file_old_text + append_text_to_history_file s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing history file, so create a new history file and write the history into that file in S3. s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') elif e.response['Error']['Code'] == "NoSuchKey": s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') pass else: s3.put_object(Body=append_text_to_history_file, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + 'history.txt') pass progressbar_value += 20 final_progress_value = 200 es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"progressbar_value": progressbar_value, "final_progress_value": final_progress_value}}) response = HttpResponse(content=fout_others, content_type='text/plain') response['Content-Disposition'] = 'attachment; filename=' + output_filename return HttpResponse('success'); except Exception as e: final_progress_value = 200 errorString = errorString es_conn.update(index=index_name_gloabals, id=id, body={"doc": {"errorString": errorString, "final_progress_value": final_progress_value}}) return JsonResponse({'finalResponse': 'Error'}) @csrf_exempt def getUserName(request): try: if request.method == "POST": userName = request.user.username; return HttpResponse(userName) except Exception as e: return HttpResponse( "Error running the program." + str(e)) @csrf_exempt def saveAndContinue(request): HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) AWS_STORAGE_BUCKET_NAME = 'dataannotate' s3 = boto3.client('s3') try: if request.method == "POST": userName = request.user.username; save_doc_Name = request.body.decode('utf-8'); saveandcontinue = json.loads(save_doc_Name); categoryName = saveandcontinue['categoryName'] timeTaken = saveandcontinue['timeTaken'] keywords = saveandcontinue['keywords'] index = saveandcontinue['index'] headers = saveandcontinue['headers'] key_value = 'catogorizedfiles/' key_value += userName + '/' index_name = 'annotatefile' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} if es_conn.indices.exists(index_name): res = es_conn.search(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) res = es_conn.search(index=index_name, body=query) fileName = res['hits']['hits'][0]['_source']['filename'] training_data = res['hits']['hits'][0]['_source']['training_data'] file_sample_open = training_data[1:] file_sample = list(filter(None, file_sample_open)) identificationNumber = [doc.split('\t')[1].strip('\r').strip('\n') for doc in file_sample] identificationNumber = identificationNumber[0] file_sample_current_document = file_sample[0] task = [doc.split('\t')[0].strip('\r').strip('\n') for doc in file_sample] task = task[0] saveAndContinueData = { 'identificationNumber': identificationNumber, 'username': userName, 'fileName': fileName, 'categoryName': categoryName, 'timespent': str(timeTaken) + ' sec', 'keywords': keywords, 'index': index, 'headers': headers, 'task': task, } INDEX_NAME = 'dataannotateresult' # save_response = es_conn.indices.create(index=INDEX_NAME, ignore=400) if es_conn.indices.exists(INDEX_NAME): savingResponse = es_conn.create(index=INDEX_NAME, doc_type=TYPE_NAME_USER, body=saveAndContinueData, id=uuid.uuid4()) else: save_response = es_conn.indices.create(index=INDEX_NAME, ignore=400) savingResponse = es_conn.create(index=INDEX_NAME, doc_type=TYPE_NAME_USER, body=saveAndContinueData, id=uuid.uuid4()) if categoryName != 'skipButton': labeleddocument = file_sample_current_document.strip('\r').strip('\n') + '\t' + categoryName + '\n' try: response2 = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + fileName) oldlabeleddocument = response2['Body'].read().decode('utf-8') labeleddocument = oldlabeleddocument + labeleddocument s3.put_object(Body=labeleddocument, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + fileName) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # There is no existing history file, so create a new history file and write the history into that file in S3. header = training_data[0].strip('\r').strip('\n') + '\t' + 'Category' labeleddocument = header + '\n' + labeleddocument s3.put_object(Body=labeleddocument, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + fileName) elif e.response['Error']['Code'] == "NoSuchKey": header = training_data[0].strip('\r').strip('\n') + '\t' + 'Category' labeleddocument = header + '\n' + labeleddocument s3.put_object(Body=labeleddocument, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + fileName) pass else: pass return HttpResponse('success') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def customFileUpload(request): # global training_data index_name = 'annotatefile' HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) AWS_STORAGE_BUCKET_NAME = 'dataannotate' try: if request.method == 'POST': shuffledArray = request.POST.getlist('shuffledArray')[0].split('#@@#') userName = request.user.username; # training_data = open(request.FILES.get('file').temporary_file_path(), 'r').read() # es_conn.indices.create(index=index_name) # form = FileUploadForm(request.POST, request.FILES) training_data = request.FILES.get('file').read().decode("ISO-8859-1") fileName = request.FILES['file'].name fileName = str( fileName.split('.')[0] + '_' + datetime.now().strftime("%Y%m%d-%H%M%S") + '.' + fileName.split('.')[1]) # res = es_conn.update(index=index_name,body={"doc": {"match_all": {}}}) headerTemplate = ['meta data', 'identification number', 'task', 'title', 'abstract', 'claims', 'application number', 'application date', 'assignee', 'current assignee', 'upc', 'appln. no.', 'appl. date', 'authors', 'affiliation', 'year', 'category'] file_sample_open = training_data file_sample_open = file_sample_open.split('\n') # split by new line file_sample_open = list(filter(None, file_sample_open)) # delete empty lines file_sample_headers = file_sample_open[0].strip('\r').split('\t') header_lower = [x.lower() for x in file_sample_headers] if set(header_lower).issubset(set(headerTemplate)): task_file_open = shuffledArray # task_file_open = task_file_open.split('\n') # split by new line # task_file_open = list(filter(None, task_file_open)) task_file_open = task_file_open[1:] task = [doc.split('\t')[0].strip('\r').strip('\n') for doc in task_file_open] task = task[0] # Now, the first line is header, so remove the first line query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} # save_response = es_conn.indices.create(index=index_name, ignore=400) if es_conn.indices.exists(index_name): es_conn.delete_by_query(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.delete_by_query(index=index_name, body=query) if len(shuffledArray) > 0: datafile = { 'username': userName, 'training_data': shuffledArray, 'filename': fileName, 'task': task } es_conn.create(index=index_name, doc_type='_doc', body=datafile, id=uuid.uuid4()) key_value = 'inputfiles/' key_value += userName + '/' s3 = boto3.client('s3') file_sample_open = json.dumps(file_sample_open, ensure_ascii=False) s3.put_object(Body=file_sample_open, Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + fileName) dataResponse = 'sucess' else: dataResponse = 'errorfile' return HttpResponse(dataResponse) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def removeData(request): HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) try: if request.method == "POST": userName = request.user.username; index_name = 'annotatefile' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} if es_conn.indices.exists(index_name): res = es_conn.search(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) res = es_conn.search(index=index_name, body=query) id = res['hits']['hits'][0]['_id'] training_data = res['hits']['hits'][0]['_source']['training_data'] training_data.pop(1) if len(training_data) == 1: es_conn.delete_by_query(index=index_name, body=query) # save_response = es_conn.indices.create(index=INDEX_NAME, ignore=400) else: es_conn.update(index=index_name, id=id, body={"doc": {"training_data": training_data}}) return HttpResponse('success') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def resetDocument(request): # global training_data index_name = 'annotatefile' HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) try: if request.method == 'POST': userName = request.user.username; query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} if es_conn.indices.exists(index_name): es_conn.delete_by_query(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) es_conn.delete_by_query(index=index_name, body=query) return HttpResponse('sucess') except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def dataWhenReload(request): HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) try: if request.method == "POST": userName = request.user.username; index_name = 'annotatefile' query = {"query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} if es_conn.indices.exists(index_name): res = es_conn.search(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) res = es_conn.search(index=index_name, body=query) training_data = res['hits']['hits'][0]['_source']['training_data'] return JsonResponse({"training_data": training_data}) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def categoryName(request): HOST_URLS = ["https://search-dataannotation-unp43zc5slowdwllbqukty73wy.us-east-1.es.amazonaws.com"] # In production or deploying to aws uncomment this line and comment local server settings. es_conn = Elasticsearch(HOST_URLS, timeout=30) try: if request.method == "POST": userName = request.user.username; index_name = 'annotatefile' query = {"_source": ["task"], "query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} if es_conn.indices.exists(index_name): res = es_conn.search(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) res = es_conn.search(index=index_name, body=query) task = res['hits']['hits'][0]['_source']['task'] index_name = 'dataannotateresult' query = {"query": {"bool": {"must": {"match": {"task": task}}}}, "size": 10000} if es_conn.indices.exists(index_name): res = es_conn.search(index=index_name, body=query) else: save_response = es_conn.indices.create(index=index_name, ignore=400) res = es_conn.search(index=index_name, body=query) return JsonResponse(res) except Exception as e: return HttpResponse( "Error running the program.") @csrf_exempt def downloads3file(request): # In production or deploying to aws uncomment this line and comment local server settings. HOST_URLS = ["https://admin:Samsung1!@search-apolloannotate-llycnkz2qhhenqpe4gj4tl2thy.us-east-1.es.amazonaws.com"] es_conn = Elasticsearch(HOST_URLS, timeout=30) AWS_STORAGE_BUCKET_NAME = 'dataannotate' s3 = boto3.client('s3') response1 = '' try: if request.method == "GET": userName = request.user.username; index_name = 'annotatefile' query = {"_source": ["filename"], "query": {"bool": {"must": {"match": {"username.keyword": userName}}}}} res = es_conn.search(index=index_name, body=query) filename = res['hits']['hits'][0]['_source']['filename'] key_value = 'catogorizedfiles/' key_value += userName + '/' response = s3.get_object(Bucket=AWS_STORAGE_BUCKET_NAME, Key=key_value + filename) filecontent = response['Body'].read().decode('utf-8') response1 = HttpResponse(filecontent, content_type='text/plain') response1['Content-Disposition'] = 'attachment; filename=' + filename # workbook.save(response1); return response1 except Exception as e: return HttpResponse( "Error running the program.")
led.py
#!/usr/bin/env python3 import led, sys, threading def run(use_curses=False): player = led.Player.Player() t1 = threading.Thread(target=player.run_and_exit) target = None if use_curses else led.Keyboard.keyboard t2 = threading.Thread(target=target, args=(player.keyboard,)) t1.start() t2.start() if __name__ == '__main__': run()
ChatClient.py
#!/usr/bin/python3 import socket import threading import tkinter BUFSIZ=1024 host='127.0.0.1' port=19787 def recv_message(sock): while True: try: message=sock.recv(BUFSIZ).decode() message_box.insert(tkinter.END, message) #for auto-scroll message_box.select_clear(message_box.size()-2) message_box.select_set(tkinter.END) message_box.yview(tkinter.END) except: break def send_message(event=None): message=input_text.get() input_text.set("") sock.sendall(message.encode()) if message=="Bye!": sock.close() top.quit() def on_closing(event=None): input_text.set("Bye!") send_message() top=tkinter.Tk() top.title("Chat Box") top.protocol("WM_DELETE_WINDOW", on_closing) frame=tkinter.Frame(top) scrollbar=tkinter.Scrollbar(frame) message_box=tkinter.Listbox(frame, height=15, width=50) scrollbar.configure(command=message_box.yview) message_box.configure(yscrollcommand=scrollbar.set) scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) message_box.pack(side=tkinter.LEFT, fill=tkinter.BOTH) frame.pack() input_text=tkinter.StringVar() input_text.set("") text_field=tkinter.Entry(top, textvariable=input_text) text_field.bind("<Return>", send_message) text_field.pack() send_button=tkinter.Button(top, text="Send") send_button.bind("<Button-1>", send_message) send_button.pack() sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) recv_thread=threading.Thread(target=recv_message, args=(sock, )) recv_thread.start() tkinter.mainloop()
suspend_resume.py
# Copyright 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Suspend and resume device with given cycles. Description ----------- Suspends and resumes the device an adjustable number of times for adjustable random lengths of time. Test Procedure -------------- This is an automated test without user interaction. When started, the test will try to suspend and resume by given arguments. Will fail if device wakes too early, or if unexpected reboot (or crash) found. Dependency ---------- - rtc's ``wakealarm`` entry in ``sysfs``. - ``check_powerd_config`` if the argument ``suspend_type`` is not set. Note that the rtc sysfs entry may vary from device to device, so the test_list must define the path to the correct sysfs entry for the specific device, the default assumes a typical ``/sys/class/rtc/rtc0`` entry. Examples -------- To suspend/resume in 1 cycle, suspend in 5~10 seconds, resume in 5~10 seconds, and suspend to memory (see more criteria from arguments):: { "pytest_name": "suspend_resume" } """ import errno import logging import os import random import re import threading from cros.factory.test import event_log # TODO(chuntsen): Deprecate event log. from cros.factory.test.i18n import _ from cros.factory.test import session from cros.factory.test import state from cros.factory.test import test_case from cros.factory.testlog import testlog from cros.factory.utils.arg_utils import Arg from cros.factory.utils import debug_utils from cros.factory.utils import file_utils from cros.factory.utils import process_utils from cros.factory.utils import sync_utils _MIN_SUSPEND_MARGIN_SECS = 5 _MESSAGES = '/var/log/messages' _WAKEUP_PATH = '/sys/class/wakeup' _KERNEL_DEBUG_SUSPEND_STATS = '/sys/kernel/debug/suspend_stats' _MAX_EARLY_RESUME_RETRY_COUNT = 3 class SuspendResumeTest(test_case.TestCase): ARGS = [ Arg('cycles', int, 'Number of cycles to suspend/resume', default=1), Arg('suspend_delay_max_secs', int, 'Max time in sec during suspend per ' 'cycle', default=10), Arg('suspend_delay_min_secs', int, 'Min time in sec during suspend per ' 'cycle', default=5), Arg('resume_delay_max_secs', int, 'Max time in sec during resume per cycle', default=10), Arg('resume_delay_min_secs', int, 'Min time in sec during resume per cycle', default=5), Arg('resume_early_margin_secs', int, 'The allowable margin for the ' 'DUT to wake early', default=0), Arg('resume_worst_case_secs', int, 'The worst case time a device is ' 'expected to take to resume', default=30), Arg('suspend_worst_case_secs', int, 'The worst case time a device is ' 'expected to take to suspend', default=60), Arg('wakealarm_path', str, 'Path to the wakealarm file', default='/sys/class/rtc/rtc0/wakealarm'), Arg('time_path', str, 'Path to the time (since_epoch) file', default='/sys/class/rtc/rtc0/since_epoch'), Arg('wakeup_count_path', str, 'Path to the wakeup_count file', default='/sys/power/wakeup_count'), Arg('suspend_type', str, 'Suspend type. The default is to use ``freeze`` if the platform ' 'supports it, or ``mem`` for other cases.', default=None), Arg('ignore_wakeup_source', str, 'Wakeup source to ignore', default=None), Arg('early_resume_retry_wait_secs', int, 'Time to wait before re-suspending after early resume', default=3), Arg('ensure_wakealarm_cleared', bool, 'Raise exception if wakealarm is not cleared after resume', default=True)] def setUp(self): self.assertTrue(os.path.exists(_WAKEUP_PATH), 'wakeup_sources file not found.') self.assertTrue(os.path.exists(_KERNEL_DEBUG_SUSPEND_STATS), 'suspend_stats file not found.') self.assertTrue(os.path.exists(self.args.wakealarm_path), 'wakealarm_path ' '%s is not found, bad path?' % self.args.wakealarm_path) self.assertTrue(os.path.exists(self.args.time_path), 'time_path %s is not ' 'found, bad path?' % self.args.time_path) self.assertGreaterEqual(self.args.suspend_delay_min_secs, _MIN_SUSPEND_MARGIN_SECS, 'The ' 'suspend_delay_min_secs is too low, bad ' 'test_list?') self.assertGreaterEqual(self.args.suspend_delay_max_secs, self.args.suspend_delay_min_secs, 'Invalid suspend ' 'timings provided in test_list (max < min).') self.assertGreaterEqual(self.args.resume_delay_max_secs, self.args.resume_delay_min_secs, 'Invalid resume ' 'timings provided in test_list (max < min).') self.goofy = state.GetInstance() self.ui.ToggleTemplateClass('font-large', True) self.done = False self.suspend_type = None self.wakeup_count = '' self.wakeup_source_event_count = {} self.start_time = 0 self.resume_at = 0 self.attempted_wake_extensions = 0 self.actual_wake_extensions = 0 self.initial_suspend_count = 0 self.alarm_started = threading.Event() self.alarm_thread = None self.messages = None # Group checker for Testlog. self.group_checker = testlog.GroupParam( 'suspend_resume_cycle', ['run', 'start_time', 'suspend_time', 'resume_time', 'resume_at', 'wakeup_count', 'suspend_count', 'initial_suspend_count', 'attempted_wake_extensions', 'actual_wake_extensions', 'alarm_suspend_delays', 'wake_source']) def tearDown(self): # Always log the last suspend/resume block we saw. This is most # useful for failures, of course, but we log the last block for # successes too to make them easier to compare. if self.messages: # Remove useless lines that have any of these right after the square # bracket: # call # G[A-Z]{2}\d? (a register name) # save messages = re.sub(r'^.*\] (call|G[A-Z]{2}\d?|save).*$\n?', '', self.messages, flags=re.MULTILINE) logging.info('Last suspend block:\n%s', re.sub('^', ' ', messages, flags=re.MULTILINE)) self.done = True if self.alarm_thread: self.alarm_thread.join(5) self.assertFalse(self.alarm_thread.isAlive(), 'Alarm thread failed join.') # Clear any active wake alarms self._SetWakealarm('0') @staticmethod def _ReadAndCastFileSafely(path, return_type=str): try: text = file_utils.ReadFile(path) except IOError as err: logging.info('Reading %s failed. Error: %s', path, err) return None try: return return_type(text) except ValueError as err: logging.info('Casting %s to %r failed. Error: %s', text, return_type, err) return None def _GetWakeupSourceCounts(self): """Return snapshot of current event counts. Returns: Dictionary, key is sysfs path and value is its event_count. """ wakeup_sources = [os.path.join(_WAKEUP_PATH, name) for name in os.listdir(_WAKEUP_PATH)] return {wakeup_source: self._ReadAndCastFileSafely( os.path.join(wakeup_source, 'event_count'), int) for wakeup_source in wakeup_sources} def _GetPossibleWakeupSources(self): """Return all possible wakeup sources that may cause the wake. After writing to self.wakeup_count, the event count of any wakeup source which tries to wake up the device will increase. Returns: Dictionary, key is sysfs path and value is its name. """ wake_sources = {} current_wakeup_source_event_count = self._GetWakeupSourceCounts() sources = (set(current_wakeup_source_event_count) | set(self.wakeup_source_event_count)) for wakeup_source in sources: snapshot_event_count = self.wakeup_source_event_count.get(wakeup_source) current_event_count = current_wakeup_source_event_count.get(wakeup_source) if snapshot_event_count == current_event_count: continue name = (self._ReadAndCastFileSafely( os.path.join(wakeup_source, 'name')) or 'unknown').strip() if snapshot_event_count is None or current_event_count is None: logging.info('wakeup_source %s(%r) %sappeared after suspend.', wakeup_source, name, 'dis' if current_event_count is None else '') else: wake_sources.update({wakeup_source: name}) return wake_sources def _MonitorWakealarm(self): """Start and extend the wakealarm as needed for the main thread.""" self._SetWakealarm(str(self.resume_at)) self.alarm_started.set() self.Sleep(_MIN_SUSPEND_MARGIN_SECS) # Wait for suspend. # The loop below will be run after resume, or when the device doesn't # suspend in _MIN_SUSPEND_MARGIN_SECS seconds. while not self.done: self.Sleep(0.5) # Wait for suspend_stats to get updated after resume. if self._ReadSuspendCount() >= self.initial_suspend_count + self.run: break # A normal suspend-resume should not get here. cur_time = self._ReadCurrentTime() if cur_time >= self.resume_at - 1: self.attempted_wake_extensions += 1 logging.warning('Late suspend detected, attempting wake extension.') try: self._SetWakealarm('+=%d' % _MIN_SUSPEND_MARGIN_SECS) except IOError: # This happens when the device actually suspends and resumes but # suspend_stats is not updated yet, or when the device hangs for a # while and suspends just before we try to extend the wake time. logging.warning('Write to wakealarm failed, assuming we woke.') break self.resume_at = self.resume_at + _MIN_SUSPEND_MARGIN_SECS self.actual_wake_extensions += 1 logging.info('Attempted extending the wake timer %d s, resume is now ' 'at %d.', _MIN_SUSPEND_MARGIN_SECS, self.resume_at) self.assertGreaterEqual( self.start_time + self.args.suspend_worst_case_secs, cur_time, 'Suspend timeout, device did not suspend within %d sec.' % self.args.suspend_worst_case_secs) self.alarm_started.clear() def _Suspend(self, retry_count=0): """Suspend the device by writing to /sys/power/state. First write to wakeup_count, then write to /sys/power/state. See kernel/power/main.c for detailed description. """ # Explicitly sync the filesystem process_utils.Spawn(['sync'], check_call=True, log_stderr_on_error=True) self.wakeup_source_event_count = self._GetWakeupSourceCounts() logging.info('Suspending at %d.', self._ReadCurrentTime()) try: # Write out the expected wakeup_count. Wakeup_count is a mechanism to # handle wakeup events in a non-racy way. The write could fail with # EINVAL if another wakeup event occurred since the last read of # wakeup_count, and we should not write to /sys/power/state if this # happens. logging.info('Writing "%s" to wakeup_count.', self.wakeup_count) file_utils.WriteFile(self.args.wakeup_count_path, self.wakeup_count) except IOError as err: if err.errno == errno.EINVAL: wake_sources = self._GetPossibleWakeupSources() raise IOError('EINVAL: Failed to write to wakeup_count. Maybe there is ' 'another program trying to suspend at the same time?' 'source=%r' % wake_sources) raise IOError('Failed to write to wakeup_count: %s' % debug_utils.FormatExceptionOnly()) try: # Suspend to memory. The write could fail with EBUSY if another wakeup # event occurred since the last write to /sys/power/wakeup_count. logging.info('Writing "%s" to /sys/power/state.', self.suspend_type) file_utils.WriteFile('/sys/power/state', self.suspend_type) except IOError as err: if err.errno == errno.EBUSY: logging.info('Early wake event when attempting suspend.') wake_sources = self._GetPossibleWakeupSources() if self.args.ignore_wakeup_source in wake_sources: if retry_count == _MAX_EARLY_RESUME_RETRY_COUNT: raise RuntimeError('Maximum re-suspend retry exceeded for ' 'ignored wakeup source %s' % self.args.ignore_wakeup_source) logging.info('Wakeup source ignored, re-suspending...') self.Sleep(self.args.early_resume_retry_wait_secs) self.wakeup_count = file_utils.ReadFile( self.args.wakeup_count_path).strip() self._Suspend(retry_count + 1) return raise IOError('EBUSY: Early wake event when attempting suspend: %s, ' 'source=%r' % (debug_utils.FormatExceptionOnly(), wake_sources)) raise IOError('Failed to write to /sys/power/state: %s' % debug_utils.FormatExceptionOnly()) logging.info('Returning from suspend at %d.', self._ReadCurrentTime()) def _ReadSuspendCount(self): """Read the current suspend count from /sys/kernel/debug/suspend_stats. This assumes the first line of suspend_stats contains the number of successfull suspend cycles. Args: None. Returns: Int, the number of suspends the system has executed since last reboot. """ line_content = file_utils.ReadFile(_KERNEL_DEBUG_SUSPEND_STATS).strip() return int(re.search(r'[0-9]+', line_content).group(0)) def _ReadCurrentTime(self): """Read the current time in seconds since_epoch. Args: None. Returns: Int, the time since_epoch in seconds. """ return int(file_utils.ReadFile(self.args.time_path).strip()) def _VerifySuspended(self, wake_time, wake_source, count, resume_at): """Verify that a reasonable suspend has taken place. Args: wake_time: the time at which the device resumed wake_source: the wake source, if known count: expected number of suspends the system has executed resume_at: expected time since epoch to have resumed Returns: Boolean, True if suspend was valid, False if not. """ self.assertGreaterEqual( wake_time, resume_at - self.args.resume_early_margin_secs, 'Premature wake detected (%d s early, source=%s), spurious event? ' '(got touched?)' % (resume_at - wake_time, wake_source or 'unknown')) self.assertLessEqual( wake_time, resume_at + self.args.resume_worst_case_secs, 'Late wake detected (%ds > %ds delay, source=%s), timer failure?' % ( wake_time - resume_at, self.args.resume_worst_case_secs, wake_source or 'unknown')) actual_count = self._ReadSuspendCount() self.assertEqual( count, actual_count, 'Incorrect suspend count: ' + ( 'no suspend?' if actual_count < count else 'spurious suspend?')) def _SetWakealarm(self, content, raise_exception=True): """Set wakealarm by writing a string to wakealarm file. See drivers/rtc/rtc-sysfs.c for detailed implementation for setting wakealarm value. Args: content: the string to write to wakealarm file. It can be TIME: Set the wakealarm time to TIME, where TIME is in seconds since epoch. If TIME is earlier than current time, the write will clear the active wakealarm. If TIME is later then current time and there is an active wakealarm, the write fails and raises IOError (EBUSY). +TIME: Set the wakealarm time to (current time + TIME seconds). If there is an active wakealarm, the write fails and raises IOError (EBUSY). +=TIME: Extend the wakealarm time by TIME seconds. If there is no active wakealarm, the write fails with IOError (EINVAL). raise_exception: True to raise IOError when writing to wakealarm file fails. Raises: IOError: when raise_exception is True and writing to wakealarm file fails. """ try: logging.info('Writing "%s" to %s.', content, self.args.wakealarm_path) file_utils.WriteFile(self.args.wakealarm_path, content) except IOError: error_msg = 'Failed to write to wakealarm.' if raise_exception: raise IOError(error_msg) logging.warning(error_msg) def _VerifyWakealarmCleared(self, raise_exception=True): """Verify that wakealarm is cleared after resume. Wakealarm should be cleared after resume, but sometimes it isn't cleared and will cause write error at next suspend (b/120858506). Report warnings or raise an exception if wakealarm is not cleared, and always explicitly clear it again to make sure we can set wakealarm at next suspend. Args: raise_exception: True to raise an exception if wakealarm is not cleared, otherwise only show warning message. Raises: RuntimeError: If raise_exception is True and wakealarm is not cleared. """ content = file_utils.ReadFile(self.args.wakealarm_path).strip() if content: error_msg = 'Wakealarm is not cleared after resume, value: %s.' % content if raise_exception: raise RuntimeError(error_msg) logging.warning(error_msg) self._SetWakealarm('0') def _HandleMessages(self, messages_start): """Finds the suspend/resume chunk in /var/log/messages. The contents are saved to self.messages to be logged on failure. Returns: The wake source, or none if unknown. """ # The last chunk we read. In a list so it can be written from # ReadMessages. last_messages = [''] def ReadMessages(messages_start): try: with open(_MESSAGES) as f: # Read from messages_start to the end of the file. f.seek(messages_start) last_messages[0] = messages = f.read() # If we see this, we can consider resume to be done. match = re.search( r'\] Restarting tasks \.\.\.' # "Restarting tasks" line r'.+' # Any number of charcaters r'\] done\.\n', # "done." line messages, re.DOTALL | re.MULTILINE) if match: messages = messages[:match.end()] return messages except IOError: logging.exception('Unable to read %s.', _MESSAGES) return None messages = sync_utils.Retry(10, 0.2, None, ReadMessages, messages_start) if not messages: # We never found it. Just use the entire last chunk read messages = last_messages[0] logging.info( 'To view suspend/resume messages: ' 'dd if=/var/log/messages skip=%d count=%d ' 'iflag=skip_bytes,count_bytes', messages_start, len(messages)) # Find the wake source wake_source = self._GetPossibleWakeupSources() logging.info('Wakeup source: %s.', wake_source or 'unknown') self.messages = messages return wake_source def _ResolveSuspendType(self): if self.args.suspend_type: self.suspend_type = self.args.suspend_type else: logging.info( 'Suspend type is not specified, auto-detect the supported one.') retcode = process_utils.Spawn( ['check_powerd_config', '--suspend_to_idle'], log=True, call=True).returncode self.suspend_type = 'freeze' if retcode == 0 else 'mem' session.console.info('Set the suspend type to %r.', self.suspend_type) def runTest(self): self._ResolveSuspendType() self.initial_suspend_count = self._ReadSuspendCount() logging.info('The initial suspend count is %d.', self.initial_suspend_count) random.seed(0) # Make test deterministic for self.run in range(1, self.args.cycles + 1): self.attempted_wake_extensions = 0 self.actual_wake_extensions = 0 alarm_suspend_delays = 0 self.alarm_thread = threading.Thread(target=self._MonitorWakealarm) self.ui.SetState( _('Suspend/Resume: {run} of {cycle}', run=self.run, cycle=self.args.cycles)) self.start_time = self._ReadCurrentTime() suspend_time = random.randint(self.args.suspend_delay_min_secs, self.args.suspend_delay_max_secs) resume_time = random.randint(self.args.resume_delay_min_secs, self.args.resume_delay_max_secs) self.resume_at = suspend_time + self.start_time logging.info('Suspend %d of %d for %d seconds, starting at %d.', self.run, self.args.cycles, suspend_time, self.start_time) self.wakeup_count = file_utils.ReadFile( self.args.wakeup_count_path).strip() self.alarm_thread.start() self.assertTrue(self.alarm_started.wait(_MIN_SUSPEND_MARGIN_SECS), 'Alarm thread timed out.') messages_start = os.path.getsize(_MESSAGES) self._Suspend() wake_time = self._ReadCurrentTime() wake_source = self._HandleMessages(messages_start) self._VerifySuspended(wake_time, wake_source, self.initial_suspend_count + self.run, self.resume_at) self._VerifyWakealarmCleared( raise_exception=self.args.ensure_wakealarm_cleared) logging.info('Resumed %d of %d for %d seconds.', self.run, self.args.cycles, resume_time) self.Sleep(resume_time) while self.alarm_thread.isAlive(): alarm_suspend_delays += 1 logging.warning('alarm thread is taking a while to return, waiting 1s.') self.Sleep(1) self.assertGreaterEqual(self.start_time + self.args.suspend_worst_case_secs, self._ReadCurrentTime(), 'alarm thread did not return within %d sec.' % self.args.suspend_worst_case_secs) suspend_count = self._ReadSuspendCount() event_log.Log('suspend_resume_cycle', run=self.run, start_time=self.start_time, suspend_time=suspend_time, resume_time=resume_time, resume_at=self.resume_at, wakeup_count=self.wakeup_count, suspend_count=suspend_count, initial_suspend_count=self.initial_suspend_count, attempted_wake_extensions=self.attempted_wake_extensions, actual_wake_extensions=self.actual_wake_extensions, alarm_suspend_delays=alarm_suspend_delays, wake_source=wake_source) with self.group_checker: testlog.LogParam('run', self.run) testlog.LogParam('start_time', self.start_time) testlog.LogParam('suspend_time', suspend_time) testlog.LogParam('resume_time', resume_time) testlog.LogParam('resume_at', self.resume_at) testlog.LogParam('wakeup_count', self.wakeup_count) testlog.LogParam('suspend_count', suspend_count) testlog.LogParam('initial_suspend_count', self.initial_suspend_count) testlog.LogParam('attempted_wake_extensions', self.attempted_wake_extensions) testlog.LogParam('actual_wake_extensions', self.actual_wake_extensions) testlog.LogParam('alarm_suspend_delays', alarm_suspend_delays) testlog.LogParam('wake_source', wake_source)
channel.py
# # Copyright (C) 2019-2020 Intel Corporation. All Rights Reserved. # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. # # # # # # # import time import socket import errno import ctypes from threading import Thread class ChannelType(object): class __type(object): def __init__(self, name, value): self.name = name self.__value = value def __str__(self): return str(self.__value) def __eq__(self, second): return self.__value == second NONE = __type('NONE', -2) CONTROL = __type('CONTROL', -1) CORE = __type('CORE', 0) MODULE = __type('MODULE', 1) UNCORE = __type('UNCORE', 2) class ChannelException(Exception): pass class Channel(object): def __init__(self, index, log): self.global_index = index self.__log = log self.__is_file_busy = False self.__file_name = None self.__created = False self.__channel_type = None self.__socket = None self.__connected = False self.__listener = None self.__clean() def __clean(self): self.__socket = None self.__listener = None self.__is_file_busy = False self.__channel_type = ChannelType.NONE def __check_socket(self): if not self.__socket: raise ChannelException("ERROR: Channel.create() wasn't called") def __send_data(self, data): ret = self.__socket.sendall(data) if ret is None: return True def __receive_data(self, length): if self.__is_file_busy: raise ChannelException("ERROR: Receving data to buffer while the file receiving") data = bytearray() while len(data) < length: try: packet = self.__socket.recv(length - len(data)) except IOError as err: raise ChannelException("ERROR: Data transfering failed. With exception {}".format(err)) if not packet: break data += bytearray(packet) return data def __close_socket(self): if self.__socket is not None: try: # self.__socket.shutdown(socket.SHUT_RDWR) self.__socket.close() except IOError: if self.__connected == False: self.__connected = True else: raise ChannelException("ERROR: Cannot close channel") @property def info(self): return "CHANNEL {}#{}".format(self.__channel_type.name, self.global_index) @property def type(self): return self.__channel_type def set_type(self, type): self.__channel_type = type def is_created(self): return self.__created def create(self, channel_type): self.__created = True self.__channel_type = channel_type self.__log.debug('{} - Creation. "data_{}.{}.bin"'.format(self.info, self.__channel_type.name, self.global_index)) self.__file_name = "data_{}.{}.bin".format(self.__channel_type.name, self.global_index) self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, ip, port, attempts=1): self.__check_socket() for _ in range(attempts): try: self.__socket.connect((ip, port)) break except socket.error as error: if error.errno != errno.ECONNREFUSED: raise time.sleep(0.1) else: raise ChannelException("ERROR: Cannot connect to target socket") self.__connected = True def send(self, data): self.__check_socket() self.__log.debug('{0} - Sending message: {data}'.format(self.info, **locals())) self.__send_data(data) def send_structure(self, structure): self.__check_socket() self.__log.debug('{} - Sending structure: {}'.format(self.info, structure.to_string())) self.__send_data(structure) def receive(self, length): self.__check_socket() data = self.__receive_data(length) self.__log.debug('{0} - Received data length of {length}: {data}'.format(self.info, **locals())) return data def receive_structure(self, structure_type): data = self.__receive_data(length=ctypes.sizeof(structure_type)) structure = structure_type.from_buffer(data) self.__log.debug('{0} - Received structure: {1}'.format(self.info, structure.to_string())) return structure def start_receive_thread(self, to_file=False): def listen_to_file(): self.__is_file_busy = True with open(self.__file_name, 'wb') as file_obj: try: packet = self.__socket.recv(1024) while packet: file_obj.write(packet) packet = self.__socket.recv(1024) except IOError as err: pass #print("ERROR: Data transfering failed on {}. With exception {}".format(self.info, err)) # raise ChannelException("ERROR: Data transfering failed on {}. With exception {}".format(self.info, err)) self.__is_file_busy = False if to_file: self.__listener = Thread(target=listen_to_file) else: NotImplementedError("Possible to write only to file now") self.__listener.start() def stop_receive_thread(self): if self.__listener: self.__listener.join() self.__listener = None def data_from_file(self): with open(self.__file_name, 'rb') as file_obj: return bytearray(file_obj.read()) def close(self): self.__log.debug('{} - Closing'.format(self.info)) self.stop_receive_thread() self.__close_socket() self.__clean() class ChannelList(object): def __init__(self, length, log): self._log = log self._length = length self._channels = [Channel(global_index, log=self._log) for global_index in range(length)] self._module_channel = None self._uncore_channel = None def __iter__(self): return iter([channel for channel in self._channels if channel.is_created()]) @property def reserved(self): return iter(self._channels) @property def length(self): return self._length def create(self, global_indexes, channel_type): for global_index in global_indexes: if global_index >= self._length: raise ChannelException("ERROR: Not possible to create data channel. Max count is reached") self._channels[global_index].create(channel_type) def connect(self, ip, port, attempts=1): for channel in self: channel.connect(ip, port, attempts) def includes(self, *channels): for channel in channels: self._channels[channel.global_index] = channel def close(self): for channel in self._channels: if channel.is_initialized(): channel.close()
pre_commit_linter.py
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pre-commit script for Oppia. This script lints Python and JavaScript code, and prints a list of lint errors to the terminal. If the directory path is passed, it will lint all Python and JavaScript files in that directory; otherwise, it will only lint files that have been touched in this commit. This script ignores all filepaths contained within .eslintignore. ===================== CUSTOMIZATION OPTIONS ===================== 1. To lint only files that have been touched in this commit python -m scripts.linters.pre_commit_linter 2. To lint all files in the folder or to lint just a specific file python -m scripts.linters.pre_commit_linter --path filepath 3. To lint a specific list of files. Separate filepaths by spaces python -m scripts.linters.pre_commit_linter --files filepath_1 filepath_2 ... filepath_n 4. To lint files in verbose mode python -m scripts.linters.pre_commit_linter --verbose 5. To lint a specific list of file extensions. Separate file extensions by spaces python -m scripts.linters.pre_commit_linter --only-check-file-extensions py js Note that the root folder MUST be named 'oppia'. """ from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import argparse import fnmatch import multiprocessing import os import re import subprocess import sys import threading import python_utils # Install third party dependencies before proceeding. from . import codeowner_linter from . import css_linter from . import general_purpose_linter from . import html_linter from . import js_ts_linter from . import linter_utils from . import other_files_linter from . import python_linter from .. import common from .. import concurrent_task_utils from .. import install_third_party_libs _PARSER = argparse.ArgumentParser() _EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group() _PARSER.add_argument( '--path', help='path to the directory with files to be linted', action='store') _EXCLUSIVE_GROUP.add_argument( '--files', nargs='+', help='specific files to be linted. Space separated list', action='store') _EXCLUSIVE_GROUP.add_argument( '--verbose', help='verbose mode. All details will be printed.', action='store_true') _PARSER.add_argument( '--only-check-file-extensions', nargs='+', choices=['html', 'css', 'js', 'ts', 'py', 'other'], help='specific file extensions to be linted. Space separated list. ' 'If either of js or ts used then both js and ts files will be linted.', action='store') _PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) _PATHS_TO_INSERT = [ os.getcwd(), os.path.join( common.GOOGLE_APP_ENGINE_SDK_HOME, 'lib', 'yaml-3.10'), os.path.join( common.GOOGLE_APP_ENGINE_SDK_HOME, 'lib', 'jinja2-2.6'), os.path.join( common.GOOGLE_APP_ENGINE_SDK_HOME), os.path.join( _PARENT_DIR, 'oppia_tools', 'webtest-%s' % common.WEBTEST_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'setuptools-%s' % common.SETUPTOOLS_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'Pillow-%s' % common.PILLOW_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'protobuf-%s' % common.PROTOBUF_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'psutil-%s' % common.PSUTIL_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'pip-tools-%s' % common.PIP_TOOLS_VERSION), os.path.join( _PARENT_DIR, 'oppia_tools', 'simple-crypt-%s' % common.SIMPLE_CRYPT_VERSION), common.THIRD_PARTY_PYTHON_LIBS_DIR ] for path in _PATHS_TO_INSERT: sys.path.insert(0, path) _TARGET_STDOUT = python_utils.string_io() _STDOUT_LIST = multiprocessing.Manager().list() _FILES = multiprocessing.Manager().dict() class FileCache(python_utils.OBJECT): """Provides thread-safe access to cached file content.""" def __init__(self): self._CACHE_DATA_DICT = {} def read(self, filepath, mode='r'): """Returns the data read from the file in unicode form. Args: filepath: str. The file path from which data is to be read. mode: str. The mode in which the file is to be opened. Returns: str. The data read from the file. """ return self._get_data(filepath, mode)[0] def readlines(self, filepath, mode='r'): """Returns the tuple containing data line by line as read from the file in unicode form. Args: filepath: str. The file path from which data is to be read. mode: str. The mode in which the file is to be opened. Returns: tuple(str). The tuple containing data line by line as read from the file. """ return self._get_data(filepath, mode)[1] def _get_data(self, filepath, mode): """Returns the collected data from the file corresponding to the given filepath. Args: filepath: str. The file path from which data is to be read. mode: str. The mode in which the file is to be opened. Returns: tuple(str, tuple(str)). The tuple containing data read from the file as first element and tuple containing the text line by line as second element. """ key = (filepath, mode) if key not in self._CACHE_DATA_DICT: with python_utils.open_file(filepath, mode, newline='') as f: lines = f.readlines() self._CACHE_DATA_DICT[key] = (''.join(lines), tuple(lines)) return self._CACHE_DATA_DICT[key] def _get_linters_for_file_extension(file_extension_to_lint): """Return linters for the file extension type. Args: file_extension_to_lint: str. The file extension to be linted. Returns: (CustomLintChecks, ThirdPartyLintChecks). A 2-tuple containing objects of lint check classes to run in parallel processing. """ parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) custom_linters = [] third_party_linters = [] file_extension_type_js_ts = file_extension_to_lint == 'js' or ( file_extension_to_lint == 'ts') if file_extension_type_js_ts: general_files_to_lint = _FILES['.js'] + _FILES['.ts'] elif file_extension_to_lint == 'other': general_files_to_lint = _FILES['other'] else: general_files_to_lint = _FILES['.%s' % file_extension_to_lint] custom_linter, third_party_linter = general_purpose_linter.get_linters( general_files_to_lint, FILE_CACHE) custom_linters.append(custom_linter) if file_extension_type_js_ts: custom_linter, third_party_linter = js_ts_linter.get_linters( _FILES['.js'], _FILES['.ts'], FILE_CACHE) custom_linters.append(custom_linter) third_party_linters.append(third_party_linter) elif file_extension_to_lint == 'html': custom_linter, third_party_linter = html_linter.get_linters( _FILES['.html'], FILE_CACHE) custom_linters.append(custom_linter) third_party_linters.append(third_party_linter) config_path_for_css_in_html = os.path.join( parent_dir, 'oppia', '.stylelintrc') custom_linter, third_party_linter = css_linter.get_linters( config_path_for_css_in_html, _FILES['.html']) third_party_linters.append(third_party_linter) elif file_extension_to_lint == 'css': config_path_for_oppia_css = os.path.join( parent_dir, 'oppia', 'core', 'templates', 'css', '.stylelintrc') custom_linter, third_party_linter = css_linter.get_linters( config_path_for_oppia_css, _FILES['.css']) third_party_linters.append(third_party_linter) elif file_extension_to_lint == 'py': custom_linter, third_party_linter = python_linter.get_linters( _FILES['.py'], FILE_CACHE) custom_linters.append(custom_linter) third_party_linters.append(third_party_linter) elif file_extension_to_lint == 'other': custom_linter, _ = codeowner_linter.get_linters(FILE_CACHE) custom_linters.append(custom_linter) custom_linter, _ = other_files_linter.get_linters(FILE_CACHE) custom_linters.append(custom_linter) return custom_linters, third_party_linters def _get_changed_filepaths(): """Returns a list of modified files (both staged and unstaged) Returns: list. A list of filepaths of modified files. """ unstaged_files = subprocess.check_output([ 'git', 'diff', '--name-only', '--diff-filter=ACM']).splitlines() staged_files = subprocess.check_output([ 'git', 'diff', '--cached', '--name-only', '--diff-filter=ACM']).splitlines() all_changed_filepaths = unstaged_files + staged_files return [filepath for filepath in all_changed_filepaths] def _get_all_files_in_directory(dir_path, excluded_glob_patterns): """Recursively collects all files in directory and subdirectories of specified path. Args: dir_path: str. Path to the folder to be linted. excluded_glob_patterns: set(str). Set of all glob patterns to be excluded. Returns: list. A list of files in directory and subdirectories without excluded files. """ files_in_directory = [] for _dir, _, files in os.walk(dir_path): for file_name in files: filepath = os.path.relpath( os.path.join(_dir, file_name), os.getcwd()) if not any([ fnmatch.fnmatch(filepath, gp) for gp in excluded_glob_patterns]): files_in_directory.append(filepath) return files_in_directory def _get_file_extensions(file_extensions_to_lint): """This function is used to return the file extensions which need to be linted and checked. Args: file_extensions_to_lint: list(str). The list of file extensions to be linted and checked. Returns: list(str). The list of all file extensions to be linted and checked. """ all_file_extensions_type = ['js', 'py', 'html', 'css', 'other'] if file_extensions_to_lint: # Check if 'js' and 'ts' both are present in file_extensions_to_lint. js_and_ts_is_present = 'js' in file_extensions_to_lint and ( 'ts' in file_extensions_to_lint) if js_and_ts_is_present: python_utils.PRINT( 'Please use only one of "js" or "ts", as we do not have ' 'separate linters for JS and TS files. If both these options ' 'are used together, then the JS/TS linter will be run twice.') python_utils.PRINT('Exiting...') sys.exit(1) return set(file_extensions_to_lint) return all_file_extensions_type def _get_all_filepaths(input_path, input_filenames): """This function is used to return the filepaths which needs to be linted and checked. Args: input_path: str. The path of the directory to be linted and checked. input_filenames: list(str). The list of filenames to be linted and checked, ignored if input_path is specified. Returns: list(str). The list of filepaths to be linted and checked. """ eslintignore_path = os.path.join(os.getcwd(), '.eslintignore') if input_path: input_path = os.path.join(os.getcwd(), input_path) if not os.path.exists(input_path): python_utils.PRINT( 'Could not locate file or directory %s. Exiting.' % input_path) python_utils.PRINT('----------------------------------------') sys.exit(1) if os.path.isfile(input_path): all_filepaths = [input_path] else: excluded_glob_patterns = FILE_CACHE.readlines(eslintignore_path) all_filepaths = _get_all_files_in_directory( input_path, excluded_glob_patterns) elif input_filenames: valid_filepaths = [] invalid_filepaths = [] for filename in input_filenames: if os.path.isfile(filename): valid_filepaths.append(filename) else: invalid_filepaths.append(filename) if invalid_filepaths: python_utils.PRINT( 'The following file(s) do not exist: %s\n' 'Exiting.' % invalid_filepaths) sys.exit(1) all_filepaths = valid_filepaths else: all_filepaths = _get_changed_filepaths() all_filepaths = [ filename for filename in all_filepaths if not any( fnmatch.fnmatch(filename, pattern) for pattern in general_purpose_linter.EXCLUDED_PATHS ) ] return all_filepaths def read_files(file_paths): """Read all files to be checked and cache them. This will spin off multiple threads to increase the efficiency. """ threads = [] for file_path in file_paths: thread = threading.Thread(target=FILE_CACHE.read, args=(file_path,)) thread.start() threads.append(thread) for thread in threads: thread.join() def categorize_files(file_paths): """Categorize all the files and store them in shared variable _FILES.""" all_filepaths_dict = { '.py': [], '.html': [], '.ts': [], '.js': [], 'other': [], '.css': [] } for file_path in file_paths: _, extension = os.path.splitext(file_path) if extension in all_filepaths_dict: all_filepaths_dict[extension].append(file_path) else: all_filepaths_dict['other'].append(file_path) _FILES.update(all_filepaths_dict) def _print_summary_of_error_messages(lint_messages): """Print summary of linter error messages. Args: lint_messages: list(str). List of linter error messages. """ if lint_messages != '': error_message_lines = [ '----------------------------------------', 'Please fix the errors below:', '----------------------------------------', ] + lint_messages linter_utils.print_failure_message('\n'.join(error_message_lines)) def _get_task_output(lint_messages, failed, task): """Returns output of running tasks. Args: lint_messages: list(str). List of summary messages of linter output. failed: bool. The boolean to check if lint checks fail or not. task: object(TestingTaskSpec). The task object to get output of linter. Returns: bool. The boolean to check if the lint checks fail or not. """ if task.task_results: for task_result in task.task_results: lint_messages += task_result.trimmed_messages if task_result.failed: failed = True return failed def _print_errors_stacktrace(errors_stacktrace): """Print errors stacktrace caught during linter execution. Args: errors_stacktrace: list(str). List of error stacktrace of lint execution failure. """ python_utils.PRINT('') python_utils.PRINT( 'Unable to run the complete lint test, please check ' 'the following stack trace and fix the errors:') python_utils.PRINT('+--------------------------+') for stacktrace in errors_stacktrace: python_utils.PRINT(stacktrace) python_utils.PRINT('--------------------------------------------------') python_utils.PRINT('') python_utils.PRINT('--------------------------------------------------') python_utils.PRINT( 'Some of the linting functions may not run until the' ' above errors gets fixed') def _get_space_separated_linter_name(linter_name): """Returns the space separated name of the linter class. Args: linter_name: str. Name of the linter class. Returns: str. Space separated name of the linter class. """ return re.sub( r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', linter_name) def main(args=None): """Main method for pre commit linter script that lints Python, JavaScript, HTML, and CSS files. """ parsed_args = _PARSER.parse_args(args=args) # File extension to be linted. file_extension_types = _get_file_extensions( parsed_args.only_check_file_extensions) # Default mode is non-verbose mode, if arguments contains --verbose flag it # will be made True, which will represent verbose mode. verbose_mode_enabled = bool(parsed_args.verbose) all_filepaths = _get_all_filepaths(parsed_args.path, parsed_args.files) install_third_party_libs.main() common.fix_third_party_imports() python_utils.PRINT('Starting Linter....') if len(all_filepaths) == 0: python_utils.PRINT('---------------------------') python_utils.PRINT('No files to check.') python_utils.PRINT('---------------------------') return read_files(all_filepaths) categorize_files(all_filepaths) # Prepare custom tasks. custom_max_concurrent_runs = 25 custom_concurrent_count = min( multiprocessing.cpu_count(), custom_max_concurrent_runs) custom_semaphore = threading.Semaphore(custom_concurrent_count) # Prepare third_party tasks. third_party_max_concurrent_runs = 2 third_party_concurrent_count = min( multiprocessing.cpu_count(), third_party_max_concurrent_runs) third_party_semaphore = threading.Semaphore(third_party_concurrent_count) custom_linters = [] third_party_linters = [] for file_extension_type in file_extension_types: if (file_extension_type == 'js' or file_extension_type == 'ts'): if len(_FILES['.js'] + _FILES['.ts']) == 0: continue elif (not file_extension_type == 'other' and not len(_FILES['.%s' % file_extension_type])): continue custom_linter, third_party_linter = _get_linters_for_file_extension( file_extension_type) custom_linters += custom_linter third_party_linters += third_party_linter # Create tasks. tasks_custom = [] tasks_third_party = [] for linter in custom_linters: name = _get_space_separated_linter_name(type(linter).__name__) task_custom = concurrent_task_utils.create_task( linter.perform_all_lint_checks, verbose_mode_enabled, custom_semaphore, name=name) tasks_custom.append(task_custom) for linter in third_party_linters: name = _get_space_separated_linter_name(type(linter).__name__) task_third_party = concurrent_task_utils.create_task( linter.perform_all_lint_checks, verbose_mode_enabled, third_party_semaphore, name=name) tasks_third_party.append(task_third_party) # Execute tasks. # Here we set Concurrency limit for custom task to 25 because we need to # parallelize the tasks to work on full capacity of CPU. # Concurrency limit for third party tasks is set to 2 because these # third party libraries have their own ways to lint at their fastest # (ie. might parallelize on their own) # Concurrency limit: 25. concurrent_task_utils.execute_tasks(tasks_custom, custom_semaphore) # Concurrency limit: 2. concurrent_task_utils.execute_tasks( tasks_third_party, third_party_semaphore) lint_messages = [] failed = False for task in tasks_custom: failed = _get_task_output(lint_messages, failed, task) for task in tasks_third_party: failed = _get_task_output(lint_messages, failed, task) errors_stacktrace = concurrent_task_utils.ALL_ERRORS if errors_stacktrace: failed = True _print_errors_stacktrace(errors_stacktrace) if failed: _print_summary_of_error_messages(lint_messages) linter_utils.print_failure_message('\n'.join([ '---------------------------', 'Checks Not Passed.', '---------------------------'])) sys.exit(1) else: linter_utils.print_success_message('\n'.join([ '---------------------------', 'All Checks Passed.', '---------------------------'])) NAME_SPACE = multiprocessing.Manager().Namespace() PROCESSES = multiprocessing.Manager().dict() NAME_SPACE.files = FileCache() FILE_CACHE = NAME_SPACE.files # The 'no coverage' pragma is used as this line is un-testable. This is because # it will only be called when pre_commit_linter.py is used as a # script. if __name__ == '__main__': # pragma: no cover main()
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import json import os import os.path import platform import random import re import ssl import stat import string import subprocess import sys import tempfile import threading import time import uuid import webbrowser from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error import yaml import dateutil.parser from dateutil.relativedelta import relativedelta from knack.log import get_logger from knack.util import CLIError from msrestazure.azure_exceptions import CloudError import requests from azure.cli.command_modules.acs import acs_client, proxy from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod from azure.cli.core.api import get_config_dir from azure.cli.core._profile import Profile from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.graphrbac.models import (ApplicationCreateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters, ResourceAccess, RequiredResourceAccess) from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes from azure.mgmt.containerservice.v2018_03_31.models import ManagedCluster from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAADProfile from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAddonProfile from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAgentPoolProfile from azure.mgmt.containerservice.models import OpenShiftManagedClusterAgentPoolProfile from azure.mgmt.containerservice.models import OpenShiftAgentPoolProfileRole from azure.mgmt.containerservice.models import OpenShiftManagedClusterIdentityProvider from azure.mgmt.containerservice.models import OpenShiftManagedClusterAADIdentityProvider from azure.mgmt.containerservice.models import OpenShiftManagedCluster from azure.mgmt.containerservice.models import OpenShiftRouterProfile from azure.mgmt.containerservice.models import OpenShiftManagedClusterAuthProfile from azure.mgmt.containerservice.models import NetworkProfile from ._client_factory import cf_container_services from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import cf_resources logger = get_logger(__name__) # pylint:disable=too-many-lines,unused-argument def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None): """ Opens a browser to the web interface for the cluster orchestrator :param name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file) def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file): orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member if str(orchestrator_type).lower() == 'kubernetes' or \ orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \ (acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file) elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos: return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) else: raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type)) def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Launch a proxy and browse the Kubernetes web UI. :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file) def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml') if os.path.exists(browse_path): os.remove(browse_path) _k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False) logger.warning('Proxy running on 127.0.0.1:8001/ui') logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1:8001/ui') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"]) def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser. :param name: name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: Path to the SSH key to use :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file): if not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) acs = acs_client.ACSClient() if not acs.connect(_get_host_name(acs_info), _get_username(acs_info), key_filename=ssh_key_file): raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info))) octarine_bin = '/opt/mesosphere/bin/octarine' if not acs.file_exists(octarine_bin): raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin)) proxy_id = _rand_str(16) proxy_cmd = '{} {}'.format(octarine_bin, proxy_id) acs.run(proxy_cmd, background=True) # Parse the output to get the remote PORT proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id) stdout, _ = acs.run(proxy_client_cmd) remote_port = int(stdout.read().decode().strip()) local_port = acs.get_available_local_port() # Set the proxy proxy.set_http_proxy('127.0.0.1', local_port) logger.warning('Proxy running on 127.0.0.1:%s', local_port) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1') try: acs.create_tunnel( remote_host='127.0.0.1', remote_port=remote_port, local_port=local_port) finally: proxy.disable_http_proxy() return def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None): acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member kwargs = {'install_location': install_location} if client_version: kwargs['client_version'] = client_version if orchestrator_type == 'kubernetes': return k8s_install_cli(**kwargs) elif orchestrator_type == 'dcos': return dcos_install_cli(**kwargs) else: raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type)) def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _urlretrieve(url, filename): req = urlopen(url, context=_ssl_context()) with open(filename, "wb") as f: f.write(req.read()) def dcos_install_cli(cmd, install_location=None, client_version='1.8'): """ Downloads the dcos command line from Mesosphere """ system = platform.system() if not install_location: raise CLIError( "No install location specified and it could not be determined from the current platform '{}'".format( system)) base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}' if system == 'Windows': file_url = base_url.format('windows', client_version, 'dcos.exe') elif system == 'Linux': # TODO Support ARM CPU here file_url = base_url.format('linux', client_version, 'dcos') elif system == 'Darwin': file_url = base_url.format('darwin', client_version, 'dcos') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to %s', install_location) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err)) def k8s_install_cli(cmd, client_version='latest', install_location=None): """Install kubectl, a command-line interface for Kubernetes clusters.""" source_url = "https://storage.googleapis.com/kubernetes-release/release" cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurechinacloud': source_url = 'https://mirror.azure.cn/kubernetes/kubectl' if client_version == 'latest': context = _ssl_context() version = urlopen(source_url + '/stable.txt', context=context).read() client_version = version.decode('UTF-8').strip() else: client_version = "v%s" % client_version file_url = '' system = platform.system() base_url = source_url + '/{}/bin/{}/amd64/{}' # ensure installation directory exists install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location) if not os.path.exists(install_dir): os.makedirs(install_dir) if system == 'Windows': file_url = base_url.format(client_version, 'windows', 'kubectl.exe') elif system == 'Linux': # TODO: Support ARM CPU here file_url = base_url.format(client_version, 'linux', 'kubectl') elif system == 'Darwin': file_url = base_url.format(client_version, 'darwin', 'kubectl') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to "%s" from "%s"', install_location, file_url) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as ex: raise CLIError('Connection error while attempting to download client ({})'.format(ex)) if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs env_paths = os.environ['PATH'].split(';') found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None) if not found: # pylint: disable=logging-format-interpolation logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n' ' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. ' 'This is good for the current command session.\n' ' 2. Update system PATH environment variable by following ' '"Control Panel->System->Advanced->Environment Variables", and re-open the command window. ' 'You only need to do it once'.format(install_dir, cli)) else: logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.', install_dir, cli) def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group): from subprocess import PIPE, Popen instance = client.get(resource_group_name, name) helm_not_installed = 'Helm not detected, please verify if it is installed.' url_chart = chart_url if image_tag is None: image_tag = 'latest' # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # If SPN is specified, the secret should also be specified if service_principal is not None and client_secret is None: raise CLIError('--client-secret must be specified when --service-principal is specified') # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() # Validate the location upon the ACI avaiable regions _validate_aci_location(norm_location) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) subscription_id = _get_subscription_id(cmd.cli_ctx) # Get the TenantID profile = Profile(cli_ctx=cmd.cli_ctx) _, _, tenant_id = profile.get_login_credentials() # Check if we want the linux connector if os_type.lower() in ['linux', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Linux', instance.enable_rbac, instance.fqdn) # Check if we want the windows connector if os_type.lower() in ['windows', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Windows', instance.enable_rbac, instance.fqdn) def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, os_type, use_rbac, masterFqdn): rbac_install = "true" if use_rbac else "false" node_taint = 'azure.com/aci' helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name k8s_master = 'https://{}'.format(masterFqdn) logger.warning("Deploying the ACI connector for '%s' using Helm", os_type) try: values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format( node_name, node_taint, os_type, image_tag, rbac_install) if service_principal: values += ",env.azureClientId=" + service_principal if client_secret: values += ",env.azureClientKey=" + client_secret if subscription_id: values += ",env.azureSubscriptionId=" + subscription_id if tenant_id: values += ",env.azureTenantId=" + tenant_id if aci_resource_group: values += ",env.aciResourceGroup=" + aci_resource_group if norm_location: values += ",env.aciRegion=" + norm_location # Currently, we need to set the master FQDN. # This is temporary and we should remove it when possible values += ",env.masterUri=" + k8s_master if helm_cmd == "install": subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values]) elif helm_cmd == "upgrade": subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values]) except subprocess.CalledProcessError as err: raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err)) def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, graceful=False, os_type='Linux'): from subprocess import PIPE, Popen helm_not_installed = "Error : Helm not detected, please verify if it is installed." # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() if os_type.lower() in ['linux', 'both']: helm_release_name = connector_name.lower() + '-linux-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) if os_type.lower() in ['windows', 'both']: helm_release_name = connector_name.lower() + '-windows-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) def _undeploy_connector(graceful, node_name, helm_release_name): if graceful: logger.warning('Graceful option selected, will try to drain the node first') from subprocess import PIPE, Popen kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.' try: Popen(["kubectl"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(kubectl_not_installed) try: drain_node = subprocess.check_output( ['kubectl', 'drain', node_name, '--force', '--delete-local-data'], universal_newlines=True) if not drain_node: raise CLIError('Could not find the node, make sure you' + ' are using the correct --os-type') except subprocess.CalledProcessError as err: raise CLIError('Could not find the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) logger.warning("Undeploying the '%s' using Helm", helm_release_name) try: subprocess.call(['helm', 'del', helm_release_name, '--purge']) except subprocess.CalledProcessError as err: raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err)) try: subprocess.check_output( ['kubectl', 'delete', 'node', node_name], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not delete the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment(cli_ctx, role, service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def _get_subscription_id(cli_ctx): _, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None) return sub_id def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) def list_acs_locations(cmd, client): return { "productionRegions": regions_in_prod, "previewRegions": regions_in_preview } def _generate_windows_profile(windows, admin_username, admin_password): if windows: if not admin_password: raise CLIError('--admin-password is required.') if len(admin_password) < 6: raise CLIError('--admin-password must be at least 6 characters') windows_profile = { "adminUsername": admin_username, "adminPassword": admin_password, } return windows_profile return None def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile): master_pool_profile = {} default_master_pool_profile = { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', } if api_version == "2017-07-01": default_master_pool_profile = _update_dict(default_master_pool_profile, { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', "vmSize": master_vm_size, "osDiskSizeGB": int(master_osdisk_size), "vnetSubnetID": master_vnet_subnet_id, "firstConsecutiveStaticIP": master_first_consecutive_static_ip, "storageProfile": master_storage_profile, }) if not master_profile: master_pool_profile = default_master_pool_profile else: master_pool_profile = _update_dict(default_master_pool_profile, master_profile) return master_pool_profile def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile): agent_pool_profiles = [] default_agent_pool_profile = { "count": int(agent_count), "vmSize": agent_vm_size, "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', } if api_version == "2017-07-01": default_agent_pool_profile = _update_dict(default_agent_pool_profile, { "count": int(agent_count), "vmSize": agent_vm_size, "osDiskSizeGB": int(agent_osdisk_size), "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', "vnetSubnetID": agent_vnet_subnet_id, "ports": agent_ports, "storageProfile": agent_storage_profile, }) if agent_profiles is None: agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"})) else: # override agentPoolProfiles by using the passed in agent_profiles for idx, ap in enumerate(agent_profiles): # if the user specified dnsPrefix, we honor that # otherwise, we use the idx to avoid duplicate dns name a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap) agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a)) return agent_pool_profiles def _generate_outputs(name, orchestrator_type, admin_username): # define outputs outputs = { "masterFQDN": { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long }, "sshMaster0": { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long }, } if orchestrator_type.lower() != "kubernetes": outputs["agentFQDN"] = { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long } # override sshMaster0 for non-kubernetes scenarios outputs["sshMaster0"] = { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long } return outputs def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile): properties = { "orchestratorProfile": { "orchestratorType": orchestrator_type, }, "masterProfile": master_pool_profile, "agentPoolProfiles": agent_pool_profiles, "linuxProfile": { "ssh": { "publicKeys": [ { "keyData": ssh_key_value } ] }, "adminUsername": admin_username }, } if api_version == "2017-07-01": properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version if windows_profile is not None: properties["windowsProfile"] = windows_profile return properties # pylint: disable=too-many-locals def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", api_version=None, master_profile=None, master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="", master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="", agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0, agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="", orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None, windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument validate=False, no_wait=False): """Create a new Acs. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param deployment_name: The name of the deployment. :type deployment_name: str :param dns_name_prefix: Sets the Domain name prefix for the cluster. The concatenation of the domain name and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. :type dns_name_prefix: str :param name: Resource name for the container service. :type name: str :param ssh_key_value: Configure all linux machines with the SSH RSA public key string. Your key should include three parts, for example 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm :type ssh_key_value: str :param content_version: If included it must match the ContentVersion in the template. :type content_version: str :param admin_username: User name for the Linux Virtual Machines. :type admin_username: str :param api_version: ACS API version to use :type api_version: str :param master_profile: MasterProfile used to describe master pool :type master_profile: dict :param master_vm_size: The size of master pool Virtual Machine :type master_vm_size: str :param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine :type master_osdisk_size: int :param master_count: The number of masters for the cluster. :type master_count: int :param master_vnet_subnet_id: The vnet subnet id for master pool :type master_vnet_subnet_id: str :param master_storage_profile: The storage profile used for master pool. Possible value could be StorageAccount, ManagedDisk. :type master_storage_profile: str :param agent_profiles: AgentPoolProfiles used to describe agent pools :type agent_profiles: dict :param agent_vm_size: The size of the Virtual Machine. :type agent_vm_size: str :param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine :type agent_osdisk_size: int :param agent_vnet_subnet_id: The vnet subnet id for master pool :type agent_vnet_subnet_id: str :param agent_ports: the ports exposed on the agent pool :type agent_ports: list :param agent_storage_profile: The storage profile used for agent pool. Possible value could be StorageAccount, ManagedDisk. :type agent_storage_profile: str :param location: Location for VM resources. :type location: str :param orchestrator_type: The type of orchestrator used to manage the applications on the cluster. :type orchestrator_type: str or :class:`orchestratorType <Default.models.orchestratorType>` :param tags: Tags object. :type tags: object :param windows: If true, the cluster will be built for running Windows container. :type windows: bool :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true :type admin_password: str :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`DeploymentExtended <Default.models.DeploymentExtended>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value): raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value)) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # if api-version is not specified, or specified in a version not supported # override based on location if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]: if location in regions_in_preview: api_version = "2017-07-01" # 2017-07-01 supported in the preview locations else: api_version = "2017-01-31" # 2017-01-31 applied to other locations if orchestrator_type.lower() == 'kubernetes': principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id, dns_name_prefix, location, name) client_secret = principal_obj.get("client_secret") service_principal = principal_obj.get("service_principal") elif windows: raise CLIError('--windows is only supported for Kubernetes clusters') # set location if void if not location: location = '[resourceGroup().location]' # set os_type os_type = 'Linux' if windows: os_type = 'Windows' # set agent_ports if void if not agent_ports: agent_ports = [] # get windows_profile windows_profile = _generate_windows_profile(windows, admin_username, admin_password) # The resources.properties fields should match with ContainerServices' api model master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile) agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile) outputs = _generate_outputs(name, orchestrator_type, admin_username) properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile) resource = { "apiVersion": api_version, "location": location, "type": "Microsoft.ContainerService/containerServices", "name": name, "tags": tags, "properties": properties, } template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ resource, ], "outputs": outputs, } params = {} if service_principal is not None and client_secret is not None: properties["servicePrincipalProfile"] = { "clientId": service_principal, "secret": "[parameters('clientSecret')]", } template["parameters"] = { "clientSecret": { "type": "secureString", "metadata": { "description": "The client secret for the service principal" } } } params = { "clientSecret": { "value": client_secret } } # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name, template, params, validate, no_wait) except CloudError as ex: retry_exception = ex if 'is not valid according to the validation procedure' in ex.message or \ 'The credentials in ServicePrincipalProfile were invalid' in ex.message or \ 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.resource.resources.models import DeploymentProperties properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') return smc.validate(resource_group_name, deployment_name, properties) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties) def k8s_get_credentials(cmd, client, name, resource_group_name, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), ssh_key_file=None, overwrite_existing=False): """Download and install kubectl credentials from the cluster master :param name: The name of the cluster. :type name: str :param resource_group_name: The name of the resource group. :type resource_group_name: str :param path: Where to install the kubectl config file :type path: str :param ssh_key_file: Path to an SSH key file to use :type ssh_key_file: str """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing) def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing): if ssh_key_file is not None and not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member location = acs_info.location # pylint: disable=no-member user = acs_info.linux_profile.admin_username # pylint: disable=no-member _mkdir_p(os.path.dirname(path)) path_candidate = path ix = 0 while os.path.exists(path_candidate): ix += 1 path_candidate = '{}-{}-{}'.format(path, name, ix) # TODO: this only works for public cloud, need other casing for national clouds acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location), '.kube/config', path_candidate, key_filename=ssh_key_file) # merge things if path_candidate != path: try: merge_kubernetes_configurations(path, path_candidate, overwrite_existing) except yaml.YAMLError as exc: logger.warning('Failed to merge credentials to kube config file: %s', exc) logger.warning('The credentials have been saved to %s', path_candidate) def _handle_merge(existing, addition, key, replace): if not addition[key]: return if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: from knack.prompting import prompt_y_n, NoTTYException msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) else: raise except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError('failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file) print(msg) def _get_host_name(acs_info): """ Gets the FQDN from the acs_info object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info is None: raise CLIError('Missing acs_info') if acs_info.master_profile is None: raise CLIError('Missing master_profile') if acs_info.master_profile.fqdn is None: raise CLIError('Missing fqdn') return acs_info.master_profile.fqdn def _get_username(acs_info): """ Gets the admin user name from the Linux profile of the ContainerService object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info.linux_profile is not None: return acs_info.linux_profile.admin_username return None def _get_acs_info(cli_ctx, name, resource_group_name): """ Gets the ContainerService object from Azure REST API. :param name: ACS resource name :type name: String :param resource_group_name: Resource group name :type resource_group_name: String """ container_services = cf_container_services(cli_ctx, None) return container_services.get(resource_group_name, name) def _rand_str(n): """ Gets a random string """ choices = string.ascii_lowercase + string.digits return ''.join(random.SystemRandom().choice(choices) for _ in range(n)) def _mkdir_p(path): # http://stackoverflow.com/a/600612 try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count): instance = client.get(resource_group_name, container_service_name) instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member # null out the service principal because otherwise validation complains if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes: instance.service_principal_profile = None # null out the windows profile so that validation doesn't complain about not having the admin password instance.windows_profile = None return client.create_or_update(resource_group_name, container_service_name, instance) def list_container_services(cmd, client, resource_group_name=None): ''' List Container Services. ''' svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \ if resource_group_name else client.list() return list(svc_list) def show_service_principal(client, identifier): object_id = _resolve_service_principal(client, identifier) return client.get(object_id) def _resolve_service_principal(client, identifier): # todo: confirm with graph team that a service principal name must be unique result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier))) if result: return result[0].object_id try: uuid.UUID(identifier) return identifier # assume an object id except ValueError: raise CLIError("service principal '{}' doesn't exist".format(identifier)) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds, required_resource_access=required_resource_accesses) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def update_application(client, object_id, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) try: if key_creds: client.update_key_credentials(object_id, key_creds) if password_creds: client.update_password_credentials(object_id, password_creds) return except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError('specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = dateutil.parser.parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = dateutil.parser.parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) role_id = _resolve_role_id(role, scope, definitions_client) object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) elif len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError("No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_address='127.0.0.1', listen_port='8001'): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port) _, browse_path = tempfile.mkstemp() # TODO: need to add an --admin option? aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port)) result = json.loads(response.text) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{}'.format(term_id), json={"url": result['url']}) logger.warning('To view the console, please open %s in a new tab', result['url']) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(proxy_url) try: try: subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system", "port-forward", "--address", listen_address, dashboard_pod, "{0}:9090".format(listen_port)], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: if err.output.find(b'unknown flag: --address'): if listen_address != '127.0.0.1': logger.warning('"--address" is only supported in kubectl v1.13 and later.') logger.warning('The "--listen-address" argument will be ignored.') subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system", "port-forward", dashboard_pod, "{0}:9090".format(listen_port)]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: # TODO: Better error handling here. requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _validate_ssh_key(no_ssh_key, ssh_key_value): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) # pylint: disable=too-many-statements def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals dns_name_prefix=None, location=None, admin_username="azureuser", kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_size=0, node_count=3, nodepool_name="nodepool1", service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, skip_subnet_role_assignment=False, network_plugin=None, network_policy=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, enable_addons=None, workspace_resource_id=None, vnet_subnet_id=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, generate_ssh_keys=False, # pylint: disable=unused-argument no_wait=False): _validate_ssh_key(no_ssh_key, ssh_key_value) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location agent_pool_profile = ManagedClusterAgentPoolProfile( name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it count=int(node_count), vm_size=node_vm_size, os_type="Linux", storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, max_pods=int(max_pods) if max_pods else None ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config) principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, location=location, name=name) service_principal_profile = ContainerServiceServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret"), key_vault_secret_ref=None) if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): scope = vnet_subnet_id if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', service_principal_profile.client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]): if not network_plugin: raise CLIError('Please explicitly specify the network plugin type') if pod_cidr and network_plugin == "azure": raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified') network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address, network_policy=network_policy ) addon_profiles = _handle_addons_args( cmd, enable_addons, subscription_id, resource_group_name, {}, workspace_resource_id ) monitoring = False if 'omsagent' in addon_profiles: monitoring = True _ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent']) aad_profile = None if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]): if aad_tenant_id is None: profile = Profile(cli_ctx=cmd.cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.') mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=False if disable_rbac else True, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile) # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=mc) # add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource # mdm metrics supported only in azure public cloud so add the role assignment only in this cloud cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud' and monitoring: from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_profile.client_id, scope=cluster_resource_id): logger.warning('Could not create a role assignment for monitoring addon. ' 'Are you an Owner on this subscription?') return result except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) instance = _update_addons( cmd, instance, subscription_id, resource_group_name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) service_principal_client_id = instance.service_principal_profile.client_id instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait) if 'omsagent' in instance.addon_profiles: _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent']) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_client_id, scope=cluster_resource_id): logger.warning('Could not create a role assignment for Monitoring addon. ' 'Are you an Owner on this subscription?') # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_get_versions(cmd, client, location): return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), overwrite_existing=False): credentialResults = None if admin: credentialResults = client.list_cluster_admin_credentials(resource_group_name, name) else: credentialResults = client.list_cluster_user_credentials(resource_group_name, name) if not credentialResults: raise CLIError("No Kubernetes credentials found.") else: try: kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8') _print_or_merge_credentials(path, kubeconfig, overwrite_existing) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") ADDONS = { 'http_application_routing': 'httpApplicationRouting', 'monitoring': 'omsagent', 'virtual-node': 'aciConnector' } def aks_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_nulls(list(managed_clusters)) def aks_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def aks_update_credentials(cmd, client, resource_group_name, name, reset_service_principal=False, reset_aad=False, service_principal=None, client_secret=None, aad_server_app_id=None, aad_server_app_secret=None, aad_client_app_id=None, aad_tenant_id=None, no_wait=False): if bool(reset_service_principal) == bool(reset_aad): raise CLIError('usage error: --reset-service-principal | --reset-aad-profile') if reset_service_principal: if service_principal is None or client_secret is None: raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET') return sdk_no_wait(no_wait, client.reset_service_principal_profile, resource_group_name, name, service_principal, client_secret) if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID ' '--aad-server-app-secret SECRET [--aad-tenant-id ID]') parameters = { 'clientAppID': aad_client_app_id, 'serverAppID': aad_server_app_id, 'serverAppSecret': aad_server_app_secret, 'tenantID': aad_tenant_id } return sdk_no_wait(no_wait, client.reset_aad_profile, resource_group_name, name, parameters) def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument instance = client.get(resource_group_name, name) if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) instance.kubernetes_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) DEV_SPACES_EXTENSION_NAME = 'dev-spaces' DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom' def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False): """ Use Azure Dev Spaces with a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param update: Update to the latest Azure Dev Spaces client components. :type update: bool :param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience. :type space_name: String :param prompt: Do not prompt for confirmation. Requires --space. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt) except TypeError: raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.") except AttributeError as ae: raise CLIError(ae) def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False): """ Remove Azure Dev Spaces from a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param prompt: Do not prompt for confirmation. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt) except AttributeError as ae: raise CLIError(ae) def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None, subnet_name=None, no_wait=False): # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} os_type = 'Linux' # for each addons argument for addon_arg in addon_args: addon = ADDONS[addon_arg] if addon == 'aciConnector': # only linux is supported for now, in the future this will be a user flag addon += os_type # addon name is case insensitive addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == 'omsagent': if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id} elif addon.lower() == ('aciConnector' + os_type).lower(): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError('The aci-connector addon requires setting a subnet name.') addon_profile.config = {'SubnetName': subnet_name} addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: raise CLIError("The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def _get_azext_module(extension_name, module_name): try: # Adding the installed extension in the path from azure.cli.core.extension.operations import add_extension_to_path add_extension_to_path(extension_name) # Import the extension module from importlib import import_module azext_custom = import_module(module_name) return azext_custom except ImportError as ie: raise CLIError(ie) def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True) addons.remove('http_application_routing') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profiles['omsagent'] = ManagedClusterAddonProfile( enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id}) addons.remove('monitoring') # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is elif workspace_resource_id: raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _install_dev_spaces_extension(cmd, extension_name): try: from azure.cli.core.extension import operations operations.add_extension(cmd=cmd, extension_name=extension_name) except Exception: # nopa pylint: disable=broad-except return False return True def _update_dev_spaces_extension(cmd, extension_name, extension_module): from azure.cli.core.extension import ExtensionNotInstalledException try: from azure.cli.core.extension import operations operations.update_extension(cmd=cmd, extension_name=extension_name) operations.reload_extension(extension_name=extension_name) except CLIError as err: logger.info(err) except ExtensionNotInstalledException as err: logger.debug(err) return False except ModuleNotFoundError as err: logger.debug(err) logger.error("Error occurred attempting to load the extension module. Use --debug for more information.") return False return True def _get_or_add_extension(cmd, extension_name, extension_module, update=False): from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension) try: get_extension(extension_name) if update: return _update_dev_spaces_extension(cmd, extension_name, extension_module) except ExtensionNotInstalledException: return _install_dev_spaces_extension(cmd, extension_name) return True def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # mapping for azure public cloud # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureCloudLocationToOmsRegionCodeMap = { "eastus": "EUS", "westeurope": "WEU", "southeastasia": "SEA", "australiasoutheast": "ASE", "usgovvirginia": "USGV", "westcentralus": "EUS", "japaneast": "EJP", "uksouth": "SUK", "canadacentral": "CCA", "centralindia": "CIN", "eastus2euap": "EAP" } AzureCloudRegionToOmsRegionMap = { "australiaeast": "australiasoutheast", "australiasoutheast": "australiasoutheast", "brazilsouth": "eastus", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "eastus", "eastasia": "southeastasia", "eastus": "eastus", "eastus2": "eastus", "japaneast": "japaneast", "japanwest": "japaneast", "northcentralus": "eastus", "northeurope": "westeurope", "southcentralus": "eastus", "southeastasia": "southeastasia", "uksouth": "uksouth", "ukwest": "uksouth", "westcentralus": "eastus", "westeurope": "westeurope", "westus": "eastus", "westus2": "eastus", "centralindia": "centralindia", "southindia": "centralindia", "westindia": "centralindia", "koreacentral": "southeastasia", "koreasouth": "southeastasia", "francecentral": "westeurope", "francesouth": "westeurope" } # mapping for azure china cloud # currently log analytics supported only China East 2 region AzureChinaLocationToOmsRegionCodeMap = { "chinaeast": "EAST2", "chinaeast2": "EAST2", "chinanorth": "EAST2", "chinanorth2": "EAST2" } AzureChinaRegionToOmsRegionMap = { "chinaeast": "chinaeast2", "chinaeast2": "chinaeast2", "chinanorth": "chinaeast2", "chinanorth2": "chinaeast2" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) default_region_name = "eastus" default_region_code = "EUS" workspace_region = default_region_name workspace_region_code = default_region_code cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud': workspace_region = AzureCloudRegionToOmsRegionMap[ rg_location] if AzureCloudRegionToOmsRegionMap[rg_location] else default_region_name workspace_region_code = AzureCloudLocationToOmsRegionCodeMap[ workspace_region] if AzureCloudLocationToOmsRegionCodeMap[workspace_region] else default_region_code elif cloud_name.lower() == 'azurechinacloud': default_region_name = "chinaeast2" default_region_code = "EAST2" workspace_region = AzureChinaRegionToOmsRegionMap[ rg_location] if AzureChinaRegionToOmsRegionMap[rg_location] else default_region_name workspace_region_code = AzureChinaLocationToOmsRegionCodeMap[ workspace_region] if AzureChinaLocationToOmsRegionCodeMap[workspace_region] else default_region_code else: logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name) default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): try: resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview') return resource.id except CloudError as ex: if ex.status_code != 404: raise ex else: resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region}) default_workspace_params = { 'location': workspace_region, 'properties': { 'sku': { 'name': 'standalone' } } } async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', default_workspace_params) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _ensure_container_insights_for_monitoring(cmd, addon): # Workaround for this addon key which has been seen lowercased in the wild. if 'loganalyticsworkspaceresourceid' in addon.config: addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid') workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'] workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] except IndexError: raise CLIError('Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') location = resource.location except CloudError as ex: raise ex unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _ensure_osa_aad(cli_ctx, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, identifier=None, name=None, update=False): rbac_client = get_graph_rbac_management_client(cli_ctx) if not aad_client_app_id: if not aad_client_app_secret and update: aad_client_app_secret = _create_client_secret() reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier) # Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6", additional_properties=None, type="Scope") required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access], additional_properties=None, resource_app_id="00000002-0000-0000-c000-000000000000") list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')" .format(reply_url))) if update: if list_aad_filtered: update_application(client=rbac_client.applications, object_id=list_aad_filtered[0].object_id, display_name=identifier, identifier_uris=[reply_url], reply_urls=[reply_url], homepage=reply_url, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) aad_client_app_id = list_aad_filtered[0].app_id logger.info('Updated AAD: %s', aad_client_app_id) else: result = create_application(client=rbac_client.applications, display_name=identifier, identifier_uris=[reply_url], reply_urls=[reply_url], homepage=reply_url, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) aad_client_app_id = result.app_id logger.info('Created an AAD: %s', aad_client_app_id) else: aad_client_app_id = list_aad_filtered[0].app_id aad_client_app_secret = 'whatever' # Get the TenantID if aad_tenant_id is None: profile = Profile(cli_ctx=cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() return OpenShiftManagedClusterAADIdentityProvider( client_id=aad_client_app_id, secret=aad_client_app_secret, tenant_id=aad_tenant_id, kind='AADIdentityProvider') def _ensure_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # add role first before save it if not _add_role_assignment(cli_ctx, 'Contributor', service_principal): logger.warning('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal) return load_acs_service_principal(subscription_id) def _create_client_secret(): # Add a special character to satsify AAD SP secret requirements special_chars = '!#$%&*-+_.:;<>=?@][^}{|~)(' special_char = special_chars[ord(os.urandom(1)) % len(special_chars)] client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char return client_secret def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _print_or_merge_credentials(path, kubeconfig, overwrite_existing): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations(path, temp_path, overwrite_existing) except yaml.YAMLError as ex: logger.warning('Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def _remove_osa_nulls(managed_clusters): """ Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags', 'public_hostname', 'plan', 'type', 'id'] ap_master_attrs = ['name', 'os_type'] net_attrs = ['peer_vnet_id'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) for attr in ap_master_attrs: if getattr(managed_cluster.master_pool_profile, attr, None) is None: delattr(managed_cluster.master_pool_profile, attr) for attr in net_attrs: if getattr(managed_cluster.network_profile, attr, None) is None: delattr(managed_cluster.network_profile, attr) return managed_clusters def _validate_aci_location(norm_location): """ Validate the Azure Container Instance location """ aci_locations = [ "australiaeast", "canadacentral", "centralindia", "centralus", "eastasia", "eastus", "eastus2", "eastus2euap", "japaneast", "northcentralus", "northeurope", "southcentralus", "southeastasia", "southindia", "uksouth", "westcentralus", "westus", "westus2", "westeurope" ] if norm_location not in aci_locations: raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) + ' The available locations are "{}"'.format(','.join(aci_locations))) def osa_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_osa_nulls(list(managed_clusters)) def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals fqdn, location=None, compute_vm_size="Standard_D4s_v3", compute_count=3, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, vnet_prefix="10.0.0.0/8", subnet_prefix="10.0.0.0/24", vnet_peer=None, tags=None, no_wait=False): if location is None: location = _get_rg_location(cmd.cli_ctx, resource_group_name) agent_pool_profiles = [] agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='compute', # Must be 12 chars or less before ACS RP adds to it count=int(compute_count), vm_size=compute_vm_size, os_type="Linux", role=OpenShiftAgentPoolProfileRole.compute, subnet_cidr=subnet_prefix ) agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='infra', # Must be 12 chars or less before ACS RP adds to it count=int(2), vm_size="Standard_D4s_v3", os_type="Linux", role=OpenShiftAgentPoolProfileRole.infra, subnet_cidr=subnet_prefix ) agent_pool_profiles.append(agent_node_pool_profile) agent_pool_profiles.append(agent_infra_pool_profile) agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='master', # Must be 12 chars or less before ACS RP adds to it count=int(3), vm_size="Standard_D4s_v3", os_type="Linux", subnet_cidr=subnet_prefix ) identity_providers = [] # Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now update_aad_secret = False try: client.get(resource_group_name, name) except CloudError: update_aad_secret = True osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx, aad_client_app_id=aad_client_app_id, aad_client_app_secret=aad_client_app_secret, aad_tenant_id=aad_tenant_id, identifier=fqdn, name=name, update=update_aad_secret) identity_providers.append( OpenShiftManagedClusterIdentityProvider( name='Azure AD', provider=osa_aad_identity ) ) auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers) default_router_profile = OpenShiftRouterProfile(name='default') if vnet_peer is not None: from azure.cli.core.commands.client_factory import get_subscription_id from msrestazure.tools import is_valid_resource_id, resource_id if not is_valid_resource_id(vnet_peer): vnet_peer = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Network', type='virtualNetwork', name=vnet_peer ) network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer) osamc = OpenShiftManagedCluster( location=location, tags=tags, open_shift_version="v3.11", fqdn=fqdn, network_profile=network_profile, auth_profile=auth_profile, agent_pool_profiles=agent_pool_profiles, master_pool_profile=agent_master_pool_profile, router_profiles=[default_router_profile]) try: # long_running_operation_timeout=300 return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=osamc) except CloudError as ex: raise ex def openshift_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_osa_nulls([mc])[0] def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False): instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. instance.agent_pool_profiles[0].count = int(compute_count) # pylint: disable=no-member # null out the AAD profile and add manually the masterAP name because otherwise validation complains instance.master_pool_profile.name = "master" instance.auth_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
test_timer.py
import concurrent.futures import json import multiprocessing import os from pathlib import Path import tempfile import threading import time from typing import List, Tuple import unittest from waterfalls import Timer class TestTimer(unittest.TestCase): """ Tests the `waterfalls.Timer` module. """ def test_class_instances(self) -> None: """ Tests timers defined as class instances where each `Timer` instance has multiple blocks. """ timer_a, timer_b = self._create_class_blocks() # Assert number of instances self.assertEqual(len(Timer.instances), 2) # Assert timer names self.assertEqual(timer_a.name, "Timer A") self.assertEqual(timer_b.name, "Timer B") # Assert total number of blocks self.assertEqual(len(timer_a.blocks), 4) self.assertEqual(len(timer_b.blocks), 4) # Assert block texts self.assertIsNone(timer_a.blocks[0].text) self.assertEqual(timer_a.blocks[1].text, "Block A") self.assertEqual(timer_a.blocks[2].text, "Block B") self.assertIsNone(timer_a.blocks[3].text) self.assertEqual(timer_b.blocks[0].text, "Block C") self.assertEqual(timer_b.blocks[1].text, "Block D") self.assertEqual(timer_b.blocks[2].text, "Block E") self.assertIsNone(timer_b.blocks[3].text) # Assert thread durations for i in range(4): with self.subTest(i=i): self.assertGreaterEqual(timer_a.blocks[i].thread_duration, 0) self.assertGreaterEqual(timer_b.blocks[i].thread_duration, 0) def test_class_report(self) -> None: """ Tests report generated from timers defined as class instances. """ self._create_class_blocks() report = Timer.generate_report() self._assert_simple_report(report) def test_context_instances(self) -> None: """ Tests timers defined as context managers where each `Timer` instance only has one block. """ self._create_context_blocks() self._assert_1_to_1_instances() def test_context_report(self) -> None: """ Tests report generated from timers defined as context managers. """ self._create_context_blocks() report = Timer.generate_report() self._assert_simple_report(report) def test_decorator_instances(self) -> None: """ Tests timers defined as function decorators where each `Timer` instance only has one block. """ self._create_decorator_blocks() self._assert_1_to_1_instances() def test_decorator_report(self) -> None: """ Tests report generated from timers defined as function decorators. """ self._create_decorator_blocks() report = Timer.generate_report() self._assert_simple_report(report) def test_combined_report(self) -> None: """ Tests report generated from timers defined as class instances, context managers and function decorators, one after another. """ self._create_class_blocks() self._create_context_blocks() self._create_decorator_blocks() report = Timer.generate_report() # Assert total number of blocks self.assertEqual(len(report), 8 * 3) # Assert timer names self.assertEqual(len([b for b in report if b["name"] == "Timer A"]), 4 * 3) self.assertEqual(len([b for b in report if b["name"] == "Timer B"]), 4 * 3) # Assert block texts self.assertEqual(len([b for b in report if b["text"] == "Block A"]), 3) self.assertEqual(len([b for b in report if b["text"] == "Block B"]), 3) self.assertEqual(len([b for b in report if b["text"] == "Block C"]), 3) self.assertEqual(len([b for b in report if b["text"] == "Block D"]), 3) self.assertEqual(len([b for b in report if b["text"] == "Block E"]), 3) # Assert thread durations for i in range(8 * 3 - 1): with self.subTest(i=i): self.assertGreaterEqual(report[i]["thread_duration"], 0) # Assert thread IDs for i in range(8 * 3 - 1): with self.subTest(i=i): self.assertEqual(report[i]["thread_id"], report[i + 1]["thread_id"]) def test_nested_report(self) -> None: """ Tests that different types of timers can be created within one another. """ @Timer("Decorator timer") def my_function(i): with Timer("Context timer"): timer = Timer("Class timer") timer.start(text=i) timer.stop() for i in range(2): my_function(i) report = Timer.generate_report() # Assert total number of blocks self.assertEqual(len(report), 6) # Assert timer names self.assertEqual(len([b for b in report if b["name"] == "Decorator timer"]), 2) self.assertEqual(len([b for b in report if b["name"] == "Context timer"]), 2) self.assertEqual(len([b for b in report if b["name"] == "Class timer"]), 2) # Assert block texts self.assertEqual(len([b for b in report if b["text"] == "0"]), 1) self.assertEqual(len([b for b in report if b["text"] == "1"]), 1) self.assertEqual(len([b for b in report if b["text"] is None]), 4) # Assert thread durations for i in range(6 - 1): with self.subTest(i=i): self.assertGreaterEqual(report[i]["thread_duration"], 0) # Assert thread IDs for i in range(6 - 1): with self.subTest(i=i): self.assertEqual(report[i]["thread_id"], report[i + 1]["thread_id"]) def test_block_text(self) -> None: """ Tests that `text` can be set in the constructor, in `start()` and in `stop()` methods. """ timer_a = Timer("Tiemr A", "Block A") timer_a.start() timer_a.stop() timer_b = Timer("Timer B", "Block B") timer_b.start(text="Block B2") timer_b.stop() timer_c = Timer("Timer C", "Block C") timer_c.start() timer_c.stop(text="Block C3") timer_d = Timer("Timer D", "Block D") timer_d.start(text="Block D2") timer_d.stop(text="Block D3") timer_e = Timer("Timer E") timer_e.start() timer_e.stop(text="Block E3") report = Timer.generate_report() self.assertEqual(report[0]["text"], "Block A") self.assertEqual(report[1]["text"], "Block B2") self.assertEqual(report[2]["text"], "Block C3") self.assertEqual(report[3]["text"], "Block D3") self.assertEqual(report[4]["text"], "Block E3") def test_never_started(self) -> None: """ Tests that a `Timer` can be created without being started - no report file should be saved. """ timer = Timer("Timer A") report = Timer.generate_report() self.assertEqual(report, []) with self.assertLogs(level="WARNING"): report_files = self._save_report_files() self.assertEqual(len(report_files), 0) def test_prevent_double_start(self) -> None: """ Tests two consecutive calls to `start()`. The second call should log a warning message. The timer must stay functioning and after another call to `stop()` it must generate a valid report. """ timer = Timer("Timer A") timer.start() with self.assertLogs(level="WARNING"): timer.start() report = Timer.generate_report() self.assertEqual(len(report), 0) timer.stop() report = Timer.generate_report() self.assertEqual(len(report), 1) def test_prevent_stop_without_start(self) -> None: """ Tests calling `stop()` without ever starting a timer. The call should log a warning message. The timer must stay functioning and after calls to `start()` and `stop()` it must generate a valid report. """ timer = Timer("Timer A") with self.assertLogs(level="WARNING"): timer.stop() report = Timer.generate_report() self.assertEqual(len(report), 0) timer.start() timer.stop() report = Timer.generate_report() self.assertEqual(len(report), 1) def test_save_report(self) -> None: """ Tests saving a report of a `Timer` created in the current thread. """ with Timer("Timer A"): pass report_files = self._save_report_files() self.assertEqual(len(report_files), 1) def test_save_empty_report(self) -> None: """ Tests an attempt to save a report without ever creating any `Timer` instance. """ report_files = self._save_report_files() self.assertEqual(len(report_files), 0) def test_threaded_timing(self) -> None: """ Tests two `Timer` instances, each created in its own thread. """ def my_function(i): with Timer("Timer A", text=i): pass threads = [] for i in range(2): t = threading.Thread(target=my_function, args=(i,)) threads.append(t) t.start() for t in threads: t.join() report = Timer.generate_report() self._assert_multithread_report(report) report_files = self._save_report_files() self.assertEqual(len(report_files), 1) def test_multiprocessing_timing(self) -> None: """ Tests two `Timer` instances, each created in its own process. In this case, there should be three report files - two generated by child processes and one generated by the main process. """ with tempfile.TemporaryDirectory() as temp_dir_name: os.environ["WATERFALLS_DIRECTORY"] = temp_dir_name processes = [] for i in range(2): p = multiprocessing.Process(target=self._dummy_timed_method, args=(i,)) processes.append(p) p.start() for p in processes: p.join() with Timer("Timer A"): pass Timer.save_report() report_files = self._get_files_from_dir(temp_dir_name) self.assertEqual(len(report_files), 3) for report_file in report_files: with open(os.path.join(temp_dir_name, report_file)) as rf: report = json.load(rf) self.assertEqual(len(report), 1) del os.environ["WATERFALLS_DIRECTORY"] def test_concurrent_threading(self) -> None: """ Tests two `Timer` instances, each created in its own thread within a thread pool. """ def my_function(run): with Timer("Timer A", text=run): time.sleep(0.5) # Put the worker to sleep so another thread is started with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: {executor.submit(my_function, run): run for run in range(2)} report = Timer.generate_report() self._assert_multithread_report(report) report_files = self._save_report_files() self.assertEqual(len(report_files), 1) def test_report_directory_path(self) -> None: """ Tests that directory is properly determined based on priorities. """ # The directory defaults to the current working directory directory = Timer._get_report_directory_path() self.assertEqual(directory, Path(os.getcwd())) # Directory defined as env variable has a higher priority os.environ["WATERFALLS_DIRECTORY"] = "./environ_directory" directory = Timer._get_report_directory_path() self.assertEqual(directory, Path("./environ_directory")) # Directory defined as class variable has a higher priority Timer.directory = "./cls_var_directory" directory = Timer._get_report_directory_path() self.assertEqual(directory, Path("./cls_var_directory")) # Directory defined as function argument has the highest priority directory = Timer._get_report_directory_path("./arg_directory") self.assertEqual(directory, Path("./arg_directory")) def test_report_file_name(self) -> None: """ Tests that report file name is properly determined. When calling from a parent process, it should be plain, without PID. When called from a child process, it should contain PID. """ file_name_is_main = Timer._get_report_file_name(is_main_process=True) self.assertEqual(file_name_is_main, "waterfalls.json") file_name_not_main = Timer._get_report_file_name(is_main_process=False) self.assertRegex(file_name_not_main, "waterfalls.[0-9]+.json") with tempfile.TemporaryDirectory() as temp_dir_name: os.environ["WATERFALLS_DIRECTORY"] = temp_dir_name p = multiprocessing.Process(target=self._dummy_timed_method) p.start() p.join() with Timer("Timer B"): pass Timer.save_report() report_files = self._get_files_from_dir(temp_dir_name) self.assertEqual(len(report_files), 2) self.assertIn("waterfalls.json", report_files) report_files.remove("waterfalls.json") self.assertRegex(report_files[0], "waterfalls.[0-9]+.json") def test_repr(self): """ Tests the representation of `Timer`. """ timer_a = Timer("Timer A") self.assertEqual(repr(timer_a), "Timer (name='Timer A', text=None)") timer_b = Timer("Timer B", "Block A") self.assertEqual(repr(timer_b), "Timer (name='Timer B', text='Block A')") timer_b.start("Block B") self.assertEqual(repr(timer_b), "Timer (name='Timer B', text='Block B')") timer_b.stop() self.assertEqual(repr(timer_b), "Timer (name='Timer B', text=None)") with Timer("Timer C") as timer_c: self.assertEqual(repr(timer_c), "Timer (name='Timer C', text=None)") with Timer("Timer D", text="Block D") as timer_d: self.assertEqual(repr(timer_d), "Timer (name='Timer D', text='Block D')") def tearDown(self) -> None: """ Resets `Timer` instances after each test. """ Timer.instances = [] Timer.directory = None def _create_class_blocks(self) -> Tuple[Timer, Timer]: """ Creates two `Timer` instances, each with multiple blocks. """ timer_a = Timer("Timer A") timer_a.start() timer_a.stop() timer_a.start("Block A") timer_a.stop() timer_a.start("Block B") timer_a.stop() timer_a.start() timer_a.stop() timer_b = Timer("Timer B") timer_b.start("Block C") timer_b.stop() timer_b.start("Block D") timer_b.stop() timer_b.start("Block E") timer_b.stop() timer_b.start() timer_b.stop() return timer_a, timer_b @staticmethod def _create_context_blocks() -> None: """ Creates multiple `Timer` instances, each defined as a context manager. """ with Timer("Timer A"): pass with Timer("Timer A", text="Block A"): pass with Timer("Timer A", text="Block B"): pass with Timer("Timer A"): pass with Timer("Timer B", text="Block C"): pass with Timer("Timer B", text="Block D"): pass with Timer("Timer B", text="Block E"): pass with Timer("Timer B"): pass @staticmethod def _create_decorator_blocks() -> None: """ Creates multiple `Timer` instances, each defined as a function decorator. """ @Timer("Timer A") def my_function_a(): pass @Timer("Timer A", text="Block A") def my_function_b(): pass @Timer("Timer A", text="Block B") def my_function_c(): pass @Timer("Timer A") def my_function_d(): pass @Timer("Timer B", text="Block C") def my_function_e(): pass @Timer("Timer B", text="Block D") def my_function_f(): pass @Timer("Timer B", text="Block E") def my_function_g(): pass @Timer("Timer B") def my_function_h(): pass my_function_a() my_function_b() my_function_c() my_function_d() my_function_e() my_function_f() my_function_g() my_function_h() def _assert_simple_report(self, report: List[dict]) -> None: """ Asserts a report generated by any of the timers defined as class instances, context managers and function decorators. Args: report: List of all timing blocks. """ # Assert total number of blocks self.assertEqual(len(report), 8) # Assert timer names self.assertEqual(report[0]["name"], "Timer A") self.assertEqual(report[1]["name"], "Timer A") self.assertEqual(report[2]["name"], "Timer A") self.assertEqual(report[3]["name"], "Timer A") self.assertEqual(report[4]["name"], "Timer B") self.assertEqual(report[5]["name"], "Timer B") self.assertEqual(report[6]["name"], "Timer B") self.assertEqual(report[7]["name"], "Timer B") # Assert block texts self.assertIsNone(report[0]["text"]) self.assertEqual(report[1]["text"], "Block A") self.assertEqual(report[2]["text"], "Block B") self.assertIsNone(report[3]["text"]) self.assertEqual(report[4]["text"], "Block C") self.assertEqual(report[5]["text"], "Block D") self.assertEqual(report[6]["text"], "Block E") self.assertIsNone(report[7]["text"]) # Assert start and stop times for i in range(7): with self.subTest(i=i): self.assertLessEqual(report[i]["start_time"], report[i]["stop_time"]) # Assert thread durations for i in range(7): with self.subTest(i=i): self.assertGreaterEqual(report[i]["thread_duration"], 0) # Assert thread IDs for i in range(7): with self.subTest(i=i): self.assertEqual(report[i]["thread_id"], report[i + 1]["thread_id"]) def _assert_multithread_report(self, report: List[dict]) -> None: """ Asserts a report generated by two `Timer` instances, each in its own thread. Args: report: List of all timing blocks. """ # Assert total number of blocks self.assertEqual(len(report), 2) # Assert timer names self.assertEqual(report[0]["name"], "Timer A") self.assertEqual(report[1]["name"], "Timer A") self.assertEqual(report[0]["text"], "0") self.assertEqual(report[1]["text"], "1") # Assert start and stop times self.assertLessEqual(report[0]["start_time"], report[0]["stop_time"]) self.assertLessEqual(report[1]["start_time"], report[1]["stop_time"]) # Assert thread durations self.assertGreaterEqual(report[0]["thread_duration"], 0) self.assertGreaterEqual(report[1]["thread_duration"], 0) # Assert thread IDs self.assertNotEqual(report[0]["thread_id"], report[1]["thread_id"]) report_files = self._save_report_files() # Assert generated report files self.assertEqual(len(report_files), 1) self.assertEqual(report_files[0], "waterfalls.json") def _assert_1_to_1_instances(self) -> None: """ Asserts timers which contain one block per one instance (e.g., created via context manager or function decorator). """ # Assert number of instances self.assertEqual(len(Timer.instances), 8) # Assert timer names self.assertEqual(Timer.instances[0].name, "Timer A") self.assertEqual(Timer.instances[1].name, "Timer A") self.assertEqual(Timer.instances[2].name, "Timer A") self.assertEqual(Timer.instances[3].name, "Timer A") self.assertEqual(Timer.instances[4].name, "Timer B") self.assertEqual(Timer.instances[5].name, "Timer B") self.assertEqual(Timer.instances[6].name, "Timer B") self.assertEqual(Timer.instances[7].name, "Timer B") # Assert total number of blocks for i in range(7): with self.subTest(i=i): self.assertEqual(len(Timer.instances[i].blocks), 1) # Assert block texts self.assertIsNone(Timer.instances[0].blocks[0].text) self.assertEqual(Timer.instances[1].blocks[0].text, "Block A") self.assertEqual(Timer.instances[2].blocks[0].text, "Block B") self.assertIsNone(Timer.instances[3].blocks[0].text) self.assertEqual(Timer.instances[4].blocks[0].text, "Block C") self.assertEqual(Timer.instances[5].blocks[0].text, "Block D") self.assertEqual(Timer.instances[6].blocks[0].text, "Block E") self.assertIsNone(Timer.instances[7].blocks[0].text) # Assert thread durations for i in range(7): with self.subTest(i=i): self.assertGreaterEqual(Timer.instances[i].blocks[0].thread_duration, 0) @staticmethod def _dummy_timed_method(i: int = 0) -> None: """ This method is used for tests using multiprocessing, especially on Windows where `spawn` start method is used. A `Process` needs to pickle everything it sends to the worker process. The pickled function needs to be defined at the top level (e.g., class static method). Nested functions won't be importable by the worker process and trying to pickle them raises an exception. Args: i: Optional process sequential ID that will be set as `text` on the `Timer`. """ with Timer("Timer A", text=str(i)): pass def _save_report_files(self) -> List[str]: """ Saves report(s) into report file(s). Returns: List of names of saved report files. """ with tempfile.TemporaryDirectory() as temp_dir_name: Timer.save_report(directory=temp_dir_name) return self._get_files_from_dir(temp_dir_name) @staticmethod def _get_files_from_dir(directory: str) -> List[str]: """ Lists all files in a directory, non-recursive. Args: directory: Where to look for files. Returns: List of all files in the specified `directory` or an empty list if the `directory` has no files. """ return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] if __name__ == "__main__": unittest.main()
EWSO365.py
import random import string from typing import Dict import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * import sys import traceback import json import os import hashlib from datetime import timedelta from io import StringIO import logging import warnings import email from requests.exceptions import ConnectionError from collections import deque from multiprocessing import Process import exchangelib from exchangelib.errors import ( ErrorItemNotFound, ResponseMessageError, RateLimitError, ErrorInvalidIdMalformed, ErrorFolderNotFound, ErrorMailboxStoreUnavailable, ErrorMailboxMoveInProgress, ErrorNameResolutionNoResults, MalformedResponseError, ) from exchangelib.items import Item, Message, Contact from exchangelib.services.common import EWSService, EWSAccountService from exchangelib.util import create_element, add_xml_child, MNS, TNS from exchangelib import ( IMPERSONATION, Account, EWSDateTime, EWSTimeZone, Configuration, FileAttachment, Version, Folder, HTMLBody, Body, ItemAttachment, OAUTH2, OAuth2AuthorizationCodeCredentials, Identity, ExtendedProperty ) from oauthlib.oauth2 import OAuth2Token from exchangelib.version import EXCHANGE_O365 from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter # Ignore warnings print to stdout warnings.filterwarnings("ignore") """ Constants """ APP_NAME = "ms-ews-o365" FOLDER_ID_LEN = 120 MAX_INCIDENTS_PER_FETCH = 50 # move results MOVED_TO_MAILBOX = "movedToMailbox" MOVED_TO_FOLDER = "movedToFolder" # item types FILE_ATTACHMENT_TYPE = "FileAttachment" ITEM_ATTACHMENT_TYPE = "ItemAttachment" ATTACHMENT_TYPE = "attachmentType" TOIS_PATH = "/root/Top of Information Store/" # context keys ATTACHMENT_ID = "attachmentId" ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId" NEW_ITEM_ID = "newItemId" MESSAGE_ID = "messageId" ITEM_ID = "itemId" ACTION = "action" MAILBOX = "mailbox" MAILBOX_ID = "mailboxId" FOLDER_ID = "id" TARGET_MAILBOX = 'receivedBy' # context paths CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \ f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \ f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})" CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format( ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID ) CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format( ATTACHMENT_ID ) CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format( ATTACHMENT_ID ) CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID) # fetch params LAST_RUN_TIME = "lastRunTime" LAST_RUN_IDS = "ids" LAST_RUN_FOLDER = "folderName" ERROR_COUNTER = "errorCounter" # headers ITEMS_RESULTS_HEADERS = [ "sender", "subject", "hasAttachments", "datetimeReceived", "receivedBy", "author", "toRecipients", "textBody", ] UTF_8 = 'utf-8' """ Classes """ class ProxyAdapter(requests.adapters.HTTPAdapter): """ Proxy Adapter used to add PROXY to requests """ def send(self, *args, **kwargs): kwargs['proxies'] = handle_proxy() return super().send(*args, **kwargs) class InsecureProxyAdapter(NoVerifyHTTPAdapter): """ Insecure Proxy Adapter used to add PROXY and INSECURE to requests NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class """ def send(self, *args, **kwargs): kwargs['proxies'] = handle_proxy() return super().send(*args, **kwargs) class EWSClient: def __init__( self, default_target_mailbox, client_id, client_secret, tenant_id, folder="Inbox", is_public_folder=False, request_timeout="120", max_fetch=MAX_INCIDENTS_PER_FETCH, self_deployed=True, insecure=True, proxy=False, **kwargs, ): """ Client used to communicate with EWS :param default_target_mailbox: Email address from which to fetch incidents :param client_id: Application client ID :param client_secret: Application client secret :param folder: Name of the folder from which to fetch incidents :param is_public_folder: Public Folder flag :param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server :param max_fetch: Max incidents per fetch :param insecure: Trust any certificate (not secure) """ BaseProtocol.TIMEOUT = int(request_timeout) self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/" self.ms_client = MicrosoftClient( tenant_id=tenant_id, auth_id=client_id, enc_key=client_secret, app_name=APP_NAME, base_url=self.ews_server, verify=not insecure, proxy=proxy, self_deployed=self_deployed, scope="https://outlook.office.com/.default", ) self.folder_name = folder self.is_public_folder = is_public_folder self.access_type = kwargs.get('access_type') or IMPERSONATION self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch)) self.last_run_ids_queue_size = 500 self.client_id = client_id self.client_secret = client_secret self.account_email = default_target_mailbox self.config = self.__prepare(insecure) self.protocol = BaseProtocol(self.config) def __prepare(self, insecure): """ Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION :param insecure: Trust any certificate (not secure) :return: OAuth 2 Configuration """ BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter access_token = self.ms_client.get_access_token() oauth2_token = OAuth2Token({"access_token": access_token}) self.credentials = credentials = OAuth2AuthorizationCodeCredentials( client_id=self.client_id, client_secret=self.client_secret, access_token=oauth2_token, ) # need to add identity for protocol OAuth header self.credentials.identity = Identity(upn=self.account_email) config_args = { "credentials": credentials, "auth_type": OAUTH2, "version": Version(EXCHANGE_O365), "service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx", } return Configuration(**config_args) def get_account(self, target_mailbox=None): """ Request an account from EWS :param (Optional) target_mailbox: Mailbox associated with the requested account :return: exchangelib Account """ if not target_mailbox: target_mailbox = self.account_email return Account( primary_smtp_address=target_mailbox, autodiscover=False, config=self.config, access_type=self.access_type, ) def get_items_from_mailbox(self, account, item_ids): """ Request specific items from a mailbox associated with an account :param account: EWS account or target_mailbox associated with that account :param item_ids: item_ids of the requested items :return: list of exchangelib Items """ # allow user to pass target_mailbox as account if isinstance(account, str): account = self.get_account(account) else: account = self.get_account(self.account_email) if type(item_ids) is not list: item_ids = [item_ids] items = [Item(id=x) for x in item_ids] result = list(account.fetch(ids=items)) result = [x for x in result if not isinstance(x, ErrorItemNotFound)] if len(result) != len(item_ids): raise Exception( "One or more items were not found. Check the input item ids" ) return result def get_item_from_mailbox(self, account, item_id): """ Request a single item from a mailbox associated with an account :param account: EWS account or target_mailbox associated with that account :param item_id: item_id of the requested item :return: exchangelib Item """ result = self.get_items_from_mailbox(account, [item_id]) if len(result) == 0: raise Exception(f"ItemId {str(item_id)} not found") return result[0] def get_attachments_for_item(self, item_id, account, attachment_ids=None): """ Request attachments for an item :param item_id: item_id of the item to retrieve attachments from :param account: EWS account or target_mailbox associated with that account :param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve :return: list of exchangelib Item.attachments """ item = self.get_item_from_mailbox(account, item_id) attachments = [] attachment_ids = argToList(attachment_ids) if item: if item.attachments: for attachment in item.attachments: if ( attachment_ids and attachment.attachment_id.id not in attachment_ids ): continue attachments.append(attachment) else: raise Exception("Message item not found: " + item_id) if attachment_ids and len(attachments) < len(attachment_ids): raise Exception( "Some attachment id did not found for message:" + str(attachment_ids) ) return attachments def is_default_folder(self, folder_path, is_public=None): """ Is the given folder_path public :param folder_path: folder path to check if is public :param is_public: (Optional) if provided, will return this value :return: Boolean """ if is_public is not None: return is_public if folder_path == self.folder_name: return self.is_public_folder return False def get_folder_by_path(self, path, account=None, is_public=False): """ Retrieve folder by path :param path: path of the folder :param account: account associated with the requested path :param is_public: is the requested folder public :return: exchangelib Folder """ if account is None: account = self.get_account() # handle exchange folder id if len(path) == FOLDER_ID_LEN: folders_map = account.root._folders_map if path in folders_map: return account.root._folders_map[path] if is_public: folder_result = account.public_folders_root elif path == "AllItems": folder_result = account.root else: folder_result = account.inbox.parent # Top of Information Store path = path.replace("/", "\\") path = path.split("\\") for sub_folder_name in path: folder_filter_by_name = [ x for x in folder_result.children if x.name.lower() == sub_folder_name.lower() ] if len(folder_filter_by_name) == 0: raise Exception(f"No such folder {path}") folder_result = folder_filter_by_name[0] return folder_result def send_email(self, message: Message): account = self.get_account() message.account = account message.send_and_save() class MarkAsJunk(EWSAccountService): """ EWSAccountService class used for marking items as junk """ SERVICE_NAME = "MarkAsJunk" def call(self, item_id, move_item): elements = list( self._get_elements( payload=self.get_payload(item_id=item_id, move_item=move_item) ) ) for element in elements: if isinstance(element, ResponseMessageError): return str(element) return "Success" def get_payload(self, item_id, move_item): junk = create_element( f"m:{self.SERVICE_NAME}", {"IsJunk": "true", "MoveItem": "true" if move_item else "false"}, ) items_list = create_element("m:ItemIds") item_element = create_element("t:ItemId", {"Id": item_id}) items_list.append(item_element) junk.append(items_list) return junk class GetSearchableMailboxes(EWSService): """ EWSAccountService class used for getting Searchable Mailboxes """ SERVICE_NAME = "GetSearchableMailboxes" element_container_name = f"{{{MNS}}}SearchableMailboxes" @staticmethod def parse_element(element): return { MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None else None, MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text if element.find(f"{{{TNS}}}ReferenceId") is not None else None, "displayName": element.find(f"{{{TNS}}}DisplayName").text if element.find(f"{{{TNS}}}DisplayName") is not None else None, "isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text if element.find(f"{{{TNS}}}IsExternalMailbox") is not None else None, "externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None else None, } def call(self): elements = self._get_elements(payload=self.get_payload()) return [ self.parse_element(x) for x in elements if x.find(f"{{{TNS}}}ReferenceId").text ] def get_payload(self): element = create_element(f"m:{self.SERVICE_NAME}") return element class ExpandGroup(EWSService): """ EWSAccountService class used for expanding groups """ SERVICE_NAME = "ExpandDL" element_container_name = f"{{{MNS}}}DLExpansion" @staticmethod def parse_element(element): return { MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text if element.find(f"{{{TNS}}}EmailAddress") is not None else None, "displayName": element.find(f"{{{TNS}}}Name").text if element.find(f"{{{TNS}}}Name") is not None else None, "mailboxType": element.find(f"{{{TNS}}}MailboxType").text if element.find(f"{{{TNS}}}MailboxType") is not None else None, } def call(self, email_address, recursive_expansion=False): try: if recursive_expansion == "True": group_members: Dict = {} self.expand_group_recursive(email_address, group_members) return list(group_members.values()) else: return self.expand_group(email_address) except ErrorNameResolutionNoResults: demisto.results("No results were found.") sys.exit() def get_payload(self, email_address): element = create_element(f"m:{self.SERVICE_NAME}") mailbox_element = create_element("m:Mailbox") add_xml_child(mailbox_element, "t:EmailAddress", email_address) element.append(mailbox_element) return element def expand_group(self, email_address): """ Expand given group :param email_address: email address of the group to expand :return: list dict with parsed expanded group data """ elements = self._get_elements(payload=self.get_payload(email_address)) return [self.parse_element(x) for x in elements] def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None): """ Expand group recursively :param email_address: email address of the group to expand :param non_dl_emails: non distribution only emails :param dl_emails: (Optional) distribution only emails :return: Set of dl emails and non dl emails (returned via reference) """ if dl_emails is None: dl_emails = set() if email_address in non_dl_emails or email_address in dl_emails: return None dl_emails.add(email_address) for member in self.expand_group(email_address): if ( member["mailboxType"] == "PublicDL" or member["mailboxType"] == "PrivateDL" ): self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails) else: if member["mailbox"] not in non_dl_emails: non_dl_emails[member["mailbox"]] = member # If you are modifying this probably also need to modify in other files def exchangelib_cleanup(): key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items()) try: exchangelib.close_connections() except Exception as ex: demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex)) for key, protocol in key_protocols: try: if "thread_pool" in protocol.__dict__: demisto.debug( "terminating thread pool key{} id: {}".format( key, id(protocol.thread_pool) ) ) protocol.thread_pool.terminate() del protocol.__dict__["thread_pool"] else: demisto.info( "Thread pool not found (ignoring terminate) in protcol dict: {}".format( dir(protocol.__dict__) ) ) except Exception as ex: demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex)) """ LOGGING """ log_stream = None log_handler = None def start_logging(): global log_stream global log_handler logging.raiseExceptions = False if log_stream is None: log_stream = StringIO() log_handler = logging.StreamHandler(stream=log_stream) log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) logger = logging.getLogger() logger.addHandler(log_handler) logger.setLevel(logging.DEBUG) """ Helper Functions """ def get_attachment_name(attachment_name): """ Retrieve attachment name or error string if none is provided :param attachment_name: attachment name to retrieve :return: string """ if attachment_name is None or attachment_name == "": return "demisto_untitled_attachment" return attachment_name def get_entry_for_object(title, context_key, obj, headers=None): """ Create an entry for a given object :param title: Title of the human readable :param context_key: Context key used for entry context :param obj: Object to create entry for :param headers: (Optional) headers used in the tableToMarkDown :return: Entry object to be used with demisto.results() """ if len(obj) == 0: return "There is no output results" if headers and isinstance(obj, dict): headers = list(set(headers).intersection(set(obj.keys()))) return { "Type": entryTypes["note"], "Contents": obj, "ContentsFormat": formats["json"], "ReadableContentsFormat": formats["markdown"], "HumanReadable": tableToMarkdown(title, obj, headers), "EntryContext": {context_key: obj}, } def prepare_args(args): """ Prepare arguments to be used as the API expects it :param args: demisto args :return: transformed args """ args = dict((k.replace("-", "_"), v) for k, v in list(args.items())) if "is_public" in args: args["is_public"] = args["is_public"] == "True" return args def get_limited_number_of_messages_from_qs(qs, limit): """ Retrieve a limited number of messages from query search :param qs: query search to execute :param limit: limit on number of items to retrieve from search :return: list of exchangelib.Message """ count = 0 results = [] for item in qs: if count == limit: break if isinstance(item, Message): count += 1 results.append(item) return results def keys_to_camel_case(value): """ Transform keys from snake to camel case (does nothing if no snakes are found) :param value: value to transform :return: transformed value """ def str_to_camel_case(snake_str): components = snake_str.split("_") return components[0] + "".join(x.title() for x in components[1:]) if value is None: return None if isinstance(value, (list, set)): return list(map(keys_to_camel_case, value)) if isinstance(value, dict): return dict( ( keys_to_camel_case(k), keys_to_camel_case(v) if isinstance(v, (list, dict)) else v, ) for (k, v) in list(value.items()) ) return str_to_camel_case(value) def get_last_run(client: EWSClient, last_run=None): """ Retrieve the last run time :param client: EWS Client :param last_run: (Optional) last run object :return: last run dict """ if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name: last_run = { LAST_RUN_TIME: None, LAST_RUN_FOLDER: client.folder_name, LAST_RUN_IDS: [], } if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None: last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME]) # In case we have existing last_run data if last_run.get(LAST_RUN_IDS) is None: last_run[LAST_RUN_IDS] = [] return last_run def email_ec(item): """ Create entry context for an email :param item: exchangelib.Item :return: entry context dict """ return { "CC": None if not item.cc_recipients else [mailbox.email_address for mailbox in item.cc_recipients], "BCC": None if not item.bcc_recipients else [mailbox.email_address for mailbox in item.bcc_recipients], "To": None if not item.to_recipients else [mailbox.email_address for mailbox in item.to_recipients], "From": item.author.email_address, "Subject": item.subject, "Text": item.text_body, "HTML": item.body, "HeadersMap": {header.name: header.value for header in item.headers}, } def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False): """ Parses an exchangelib item as a dict :param item: exchangelib.Item to parse :param (Optional) email_address: string mailbox :param (Optional) camel_case: Is camel case :param (Optional) compact_fields: Is compact fields :return: Item as a dict """ def parse_object_as_dict(obj): raw_dict = {} if obj is not None: for field in obj.FIELDS: raw_dict[field.name] = getattr(obj, field.name, None) return raw_dict def parse_folder_as_json(folder): raw_dict = parse_object_as_dict(folder) if "parent_folder_id" in raw_dict: raw_dict["parent_folder_id"] = parse_folder_as_json( raw_dict["parent_folder_id"] ) if "effective_rights" in raw_dict: raw_dict["effective_rights"] = parse_object_as_dict( raw_dict["effective_rights"] ) return raw_dict raw_dict = {} for field, value in item._field_vals(): if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]: raw_dict[field] = value raw_dict["id"] = item.id if getattr(item, "attachments", None): raw_dict["attachments"] = [ parse_attachment_as_dict(item.id, x) for x in item.attachments ] for time_field in [ "datetime_sent", "datetime_created", "datetime_received", "last_modified_time", "reminder_due_by", ]: value = getattr(item, time_field, None) if value: raw_dict[time_field] = value.ewsformat() for dict_field in [ "effective_rights", "parent_folder_id", "conversation_id", "author", "extern_id", "received_by", "received_representing", "reply_to", "sender", "folder", ]: value = getattr(item, dict_field, None) if value: if isinstance(value, list): raw_dict[dict_field] = [] for single_val in value: raw_dict[dict_field].append(parse_object_as_dict(single_val)) else: raw_dict[dict_field] = parse_object_as_dict(value) for list_dict_field in ["headers", "cc_recipients", "to_recipients"]: value = getattr(item, list_dict_field, None) if value: raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value] if getattr(item, "folder", None): raw_dict["folder"] = parse_folder_as_json(item.folder) folder_path = ( item.folder.absolute[len(TOIS_PATH):] if item.folder.absolute.startswith(TOIS_PATH) else item.folder.absolute ) raw_dict["folder_path"] = folder_path if compact_fields: new_dict = {} # noinspection PyListCreation fields_list = [ "datetime_created", "datetime_received", "datetime_sent", "sender", "has_attachments", "importance", "message_id", "last_modified_time", "size", "subject", "text_body", "headers", "body", "folder_path", "is_read", ] if "id" in raw_dict: new_dict["itemId"] = raw_dict["id"] fields_list.append("itemId") for field in fields_list: if field in raw_dict: new_dict[field] = raw_dict.get(field) for field in ["received_by", "author", "sender"]: if field in raw_dict: new_dict[field] = raw_dict.get(field, {}).get("email_address") for field in ["to_recipients"]: if field in raw_dict: new_dict[field] = [x.get("email_address") for x in raw_dict[field]] attachments = raw_dict.get("attachments") if attachments and len(attachments) > 0: file_attachments = [ x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE ] if len(file_attachments) > 0: new_dict["FileAttachments"] = file_attachments item_attachments = [ x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE ] if len(item_attachments) > 0: new_dict["ItemAttachments"] = item_attachments raw_dict = new_dict if camel_case: raw_dict = keys_to_camel_case(raw_dict) if email_address: raw_dict[MAILBOX] = email_address return raw_dict def get_entry_for_file_attachment(item_id, attachment): """ Creates a file entry for an attachment :param item_id: item_id of the attachment :param attachment: attachment dict :return: file entry dict for attachment """ entry = fileResult(get_attachment_name(attachment.name), attachment.content) entry["EntryContext"] = { CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment) } return entry def parse_attachment_as_dict(item_id, attachment): """ Creates a note entry for an attachment :param item_id: item_id of the attachment :param attachment: attachment dict :return: note entry dict for attachment """ try: attachment_content = ( attachment.content if isinstance(attachment, FileAttachment) else attachment.item.mime_content ) return { ATTACHMENT_ORIGINAL_ITEM_ID: item_id, ATTACHMENT_ID: attachment.attachment_id.id, "attachmentName": get_attachment_name(attachment.name), "attachmentSHA256": hashlib.sha256(attachment_content).hexdigest() if attachment_content else None, "attachmentContentType": attachment.content_type, "attachmentContentId": attachment.content_id, "attachmentContentLocation": attachment.content_location, "attachmentSize": attachment.size, "attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(), "attachmentIsInline": attachment.is_inline, ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE if isinstance(attachment, FileAttachment) else ITEM_ATTACHMENT_TYPE, } except TypeError as e: if str(e) != "must be string or buffer, not None": raise return { ATTACHMENT_ORIGINAL_ITEM_ID: item_id, ATTACHMENT_ID: attachment.attachment_id.id, "attachmentName": get_attachment_name(attachment.name), "attachmentSHA256": None, "attachmentContentType": attachment.content_type, "attachmentContentId": attachment.content_id, "attachmentContentLocation": attachment.content_location, "attachmentSize": attachment.size, "attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(), "attachmentIsInline": attachment.is_inline, ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE if isinstance(attachment, FileAttachment) else ITEM_ATTACHMENT_TYPE, } def get_entry_for_item_attachment(item_id, attachment, target_email): """ Creates a note entry for an item attachment :param item_id: Item id :param attachment: exchangelib attachment :param target_email: target email :return: note entry dict for item attachment """ item = attachment.item dict_result = parse_attachment_as_dict(item_id, attachment) dict_result.update( parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True) ) title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"' return get_entry_for_object( title, CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT, dict_result, ) """ Command Functions """ def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False): """ Retrieve expanded group command :param client: EWS Client :param email_address: Email address of the group to expand :param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False". :return: Expanded groups output tuple """ group_members = ExpandGroup(protocol=client.protocol).call( email_address, recursive_expansion ) group_details = {"name": email_address, "members": group_members} output = {"EWS.ExpandGroup": group_details} readable_output = tableToMarkdown("Group Members", group_members) return readable_output, output, group_details def get_searchable_mailboxes(client: EWSClient): """ Retrieve searchable mailboxes command :param client: EWS Client :return: Searchable mailboxes output tuple """ searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call() readable_output = tableToMarkdown( "Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"] ) output = {"EWS.Mailboxes": searchable_mailboxes} return readable_output, output, searchable_mailboxes def delete_attachments_for_message( client: EWSClient, item_id, target_mailbox=None, attachment_ids=None ): """ Deletes attachments for a given message :param client: EWS Client :param item_id: item id :param (Optional) target_mailbox: target mailbox :param (Optional) attachment_ids: attachment ids to delete :return: entries that were delted """ attachments = client.get_attachments_for_item( item_id, target_mailbox, attachment_ids ) deleted_file_attachments = [] deleted_item_attachments = [] # type: ignore for attachment in attachments: attachment_deleted_action = { ATTACHMENT_ID: attachment.attachment_id.id, ACTION: "deleted", } if isinstance(attachment, FileAttachment): deleted_file_attachments.append(attachment_deleted_action) else: deleted_item_attachments.append(attachment_deleted_action) attachment.detach() entries = [] if len(deleted_file_attachments) > 0: entry = get_entry_for_object( "Deleted file attachments", "EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT, deleted_file_attachments, ) entries.append(entry) if len(deleted_item_attachments) > 0: entry = get_entry_for_object( "Deleted item attachments", "EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT, deleted_item_attachments, ) entries.append(entry) return entries def fetch_attachments_for_message( client: EWSClient, item_id, target_mailbox=None, attachment_ids=None ): """ Fetches attachments for a message :param client: EWS Client :param item_id: item id :param (Optional) target_mailbox: target mailbox :param (Optional) attachment_ids: attachment ids :return: list of parsed entries """ account = client.get_account(target_mailbox) attachments = client.get_attachments_for_item(item_id, account, attachment_ids) entries = [] for attachment in attachments: if isinstance(attachment, FileAttachment): try: if attachment.content: entries.append(get_entry_for_file_attachment(item_id, attachment)) except TypeError as e: if str(e) != "must be string or buffer, not None": raise else: entries.append( get_entry_for_item_attachment( item_id, attachment, account.primary_smtp_address ) ) if attachment.item.mime_content: entries.append( fileResult( get_attachment_name(attachment.name) + ".eml", attachment.item.mime_content, ) ) return entries def move_item_between_mailboxes( client: EWSClient, item_id, destination_mailbox, destination_folder_path, source_mailbox=None, is_public=None, ): """ Moves item between mailboxes :param client: EWS Client :param item_id: item id :param destination_mailbox: destination mailbox :param destination_folder_path: destination folder path :param (Optional) source_mailbox: source mailbox :param (Optional) is_public: is the destination folder public :return: Output tuple """ source_account = client.get_account(source_mailbox) destination_account = client.get_account(destination_mailbox) is_public = client.is_default_folder(destination_folder_path, is_public) destination_folder = client.get_folder_by_path( destination_folder_path, destination_account, is_public ) item = client.get_item_from_mailbox(source_account, item_id) exported_items = source_account.export([item]) destination_account.upload([(destination_folder, exported_items[0])]) source_account.bulk_delete([item]) move_result = { MOVED_TO_MAILBOX: destination_mailbox, MOVED_TO_FOLDER: destination_folder_path, } readable_output = "Item was moved successfully." output = {f"EWS.Items(val.itemId === '{item_id}')": move_result} return readable_output, output, move_result def move_item( client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None ): """ Moves an item within the same mailbox :param client: EWS Client :param item_id: item id :param target_folder_path: target folder path :param (Optional) target_mailbox: mailbox containing the item :param (Optional) is_public: is the destination folder public :return: Output tuple """ account = client.get_account(target_mailbox) is_public = client.is_default_folder(target_folder_path, is_public) target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public) item = client.get_item_from_mailbox(account, item_id) if isinstance(item, ErrorInvalidIdMalformed): raise Exception("Item not found") item.move(target_folder) move_result = { NEW_ITEM_ID: item.id, ITEM_ID: item_id, MESSAGE_ID: item.message_id, ACTION: "moved", } readable_output = tableToMarkdown("Moved items", move_result) output = {CONTEXT_UPDATE_EWS_ITEM: move_result} return readable_output, output, move_result def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None): """ Delete items in a mailbox :param client: EWS Client :param item_ids: items ids to delete :param delete_type: delte type soft/hard :param (Optional) target_mailbox: mailbox containinf the items :return: Output tuple """ deleted_items = [] item_ids = argToList(item_ids) items = client.get_items_from_mailbox(target_mailbox, item_ids) delete_type = delete_type.lower() for item in items: item_id = item.id if delete_type == "trash": item.move_to_trash() elif delete_type == "soft": item.soft_delete() elif delete_type == "hard": item.delete() else: raise Exception( f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"' ) deleted_items.append( { ITEM_ID: item_id, MESSAGE_ID: item.message_id, ACTION: f"{delete_type}-deleted", } ) readable_output = tableToMarkdown( f"Deleted items ({delete_type} delete type)", deleted_items ) output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items} return readable_output, output, deleted_items def search_items_in_mailbox( client: EWSClient, query=None, message_id=None, folder_path="", limit=100, target_mailbox=None, is_public=None, selected_fields="all", ): """ Search items in mailbox :param client: EWS Client :param (Optional) query: query to execute :param (Optional) message_id: message ids to search :param (Optional) folder_path: folder path to search :param (Optional) limit: max amount of items to fetch :param (Optional) target_mailbox: mailbox containing the items :param (Optional) is_public: is the targeted folder public :param (Optional) selected_fields: Selected fields :return: Output tuple """ if not query and not message_id: return_error("Missing required argument. Provide query or message-id") if message_id and message_id[0] != "<" and message_id[-1] != ">": message_id = "<{}>".format(message_id) account = client.get_account(target_mailbox) limit = int(limit) if folder_path.lower() == "inbox": folders = [account.inbox] elif folder_path: is_public = client.is_default_folder(folder_path, is_public) folders = [client.get_folder_by_path(folder_path, account, is_public)] else: folders = account.inbox.parent.walk() # pylint: disable=E1101 items = [] # type: ignore selected_all_fields = selected_fields == "all" if selected_all_fields: restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore else: restricted_fields = set(argToList(selected_fields)) # type: ignore restricted_fields.update(["id", "message_id"]) # type: ignore for folder in folders: if Message not in folder.supported_item_models: continue if query: items_qs = folder.filter(query).only(*restricted_fields) else: items_qs = folder.filter(message_id=message_id).only(*restricted_fields) items += get_limited_number_of_messages_from_qs(items_qs, limit) if len(items) >= limit: break items = items[:limit] searched_items_result = [ parse_item_as_dict( item, account.primary_smtp_address, camel_case=True, compact_fields=selected_all_fields, ) for item in items ] if not selected_all_fields: searched_items_result = [ {k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)} for i in searched_items_result ] for item in searched_items_result: item["itemId"] = item.pop("id", "") readable_output = tableToMarkdown( "Searched items", searched_items_result, headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None, ) output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result} return readable_output, output, searched_items_result def get_out_of_office_state(client: EWSClient, target_mailbox=None): """ Retrieve get out of office state of the targeted mailbox :param client: EWS Client :param (Optional) target_mailbox: target mailbox :return: Output tuple """ account = client.get_account(target_mailbox) oof = account.oof_settings oof_dict = { "state": oof.state, # pylint: disable=E1101 "externalAudience": getattr(oof, "external_audience", None), "start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101 "end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101 "internalReply": getattr(oof, "internal_replay", None), "externalReply": getattr(oof, "external_replay", None), MAILBOX: account.primary_smtp_address, } readable_output = tableToMarkdown( f"Out of office state for {account.primary_smtp_address}", oof_dict ) output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict} return readable_output, output, oof_dict def recover_soft_delete_item( client: EWSClient, message_ids, target_folder_path="Inbox", target_mailbox=None, is_public=None, ): """ Recovers soft deleted items :param client: EWS Client :param message_ids: Message ids to recover :param (Optional) target_folder_path: target folder path :param (Optional) target_mailbox: target mailbox :param (Optional) is_public: is the target folder public :return: """ account = client.get_account(target_mailbox) is_public = client.is_default_folder(target_folder_path, is_public) target_folder = client.get_folder_by_path(target_folder_path, account, is_public) recovered_messages = [] message_ids = argToList(message_ids) items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101 message_id__in=message_ids ).all() # pylint: disable=E1101 recovered_items = set() for item in items_to_recover: recovered_items.add(item) if len(recovered_items) != len(message_ids): missing_items = set(message_ids).difference(recovered_items) raise Exception( f"Some message ids are missing in recoverable items directory: {missing_items}" ) for item in recovered_items: item.move(target_folder) recovered_messages.append( {ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"} ) readable_output = tableToMarkdown("Recovered messages", recovered_messages) output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages} return readable_output, output, recovered_messages def get_contacts(client: EWSClient, limit, target_mailbox=None): """ Retrieve contacts of the target mailbox or client mailbox :param client: EWS Client :param limit: max amount of contacts to retrieve :param (Optional) target_mailbox: Target mailbox :return: """ def parse_physical_address(address): result = {} for attr in ["city", "country", "label", "state", "street", "zipcode"]: result[attr] = getattr(address, attr, None) return result def parse_phone_number(phone_number): result = {} for attr in ["label", "phone_number"]: result[attr] = getattr(phone_number, attr, None) return result def parse_contact(contact): contact_dict = dict( (k, v if not isinstance(v, EWSDateTime) else v.ewsformat()) for k, v in list(contact._field_vals()) if isinstance(v, str) or isinstance(v, EWSDateTime) ) if isinstance(contact, Contact) and contact.physical_addresses: contact_dict["physical_addresses"] = list( map(parse_physical_address, contact.physical_addresses) ) if isinstance(contact, Contact) and contact.phone_numbers: contact_dict["phone_numbers"] = list( map(parse_phone_number, contact.phone_numbers) ) if ( isinstance(contact, Contact) and contact.email_addresses and len(contact.email_addresses) > 0 ): contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses] contact_dict = keys_to_camel_case(contact_dict) contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v) contact_dict.pop("mimeContent", None) contact_dict["originMailbox"] = target_mailbox return contact_dict account = client.get_account(target_mailbox) contacts = [] for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101 contacts.append(parse_contact(contact)) readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts) output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts} return readable_output, output, contacts def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None): """ Creates a folder in the target mailbox or the client mailbox :param client: EWS Client :param new_folder_name: new folder name :param folder_path: path of the new folder :param (Optional) target_mailbox: target mailbox :return: Output tuple """ account = client.get_account(target_mailbox) full_path = os.path.join(folder_path, new_folder_name) try: if client.get_folder_by_path(full_path, account): return f"Folder {full_path} already exists", except Exception: pass parent_folder = client.get_folder_by_path(folder_path, account) f = Folder(parent=parent_folder, name=new_folder_name) f.save() client.get_folder_by_path(full_path, account) return f"Folder {full_path} created successfully", def find_folders(client: EWSClient, target_mailbox=None): """ Finds folders in the mailbox :param client: EWS Client :param (Optional) target_mailbox: target mailbox :return: Output tuple """ account = client.get_account(target_mailbox) root = account.root if client.is_public_folder: root = account.public_folders_root folders = [] for f in root.walk(): # pylint: disable=E1101 folder = folder_to_context_entry(f) folders.append(folder) folders_tree = root.tree() # pylint: disable=E1101 readable_output = folders_tree output = {"EWS.Folders(val.id == obj.id)": folders} return readable_output, output, folders def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None): """ Marks item as junk in the target mailbox or client mailbox :param client: EWS Client :param item_id: item ids to mark as junk :param move_items: "yes" or "no" - to move or not to move to trash :param (Optional) target_mailbox: target mailbox :return: """ account = client.get_account(target_mailbox) move_items = move_items.lower() == "yes" ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items) mark_as_junk_result = { ITEM_ID: item_id, } if ews_result == "Success": mark_as_junk_result[ACTION] = "marked-as-junk" else: raise Exception("Failed mark-item-as-junk with error: " + ews_result) readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result) output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result} return readable_output, output, mark_as_junk_result def get_items_from_folder( client: EWSClient, folder_path, limit=100, target_mailbox=None, is_public=None, get_internal_item="no", ): """ Retrieve items from folder path :param client: EWS Client :param folder_path: folder path :param (Optional) limit: max amount of items to retrieve :param (Optional) target_mailbox: target mailbox :param (Optional) is_public: is the folder public :param (Optional) get_internal_item: should also retrieve internal items ("no" by default) :return: Output tuple """ account = client.get_account(target_mailbox) limit = int(limit) get_internal_item = get_internal_item == "yes" is_public = client.is_default_folder(folder_path, is_public) folder = client.get_folder_by_path(folder_path, account, is_public) qs = folder.filter().order_by("-datetime_created")[:limit] items = get_limited_number_of_messages_from_qs(qs, limit) items_result = [] for item in items: item_attachment = parse_item_as_dict( item, account.primary_smtp_address, camel_case=True, compact_fields=True ) for attachment in item.attachments: if ( get_internal_item and isinstance(attachment, ItemAttachment) and isinstance(attachment.item, Message) ): # if found item attachment - switch item to the attchment item_attachment = parse_item_as_dict( attachment.item, account.primary_smtp_address, camel_case=True, compact_fields=True, ) break items_result.append(item_attachment) hm_headers = [ "sender", "subject", "hasAttachments", "datetimeReceived", "receivedBy", "author", "toRecipients", "id", ] readable_output = tableToMarkdown( "Items in folder " + folder_path, items_result, headers=hm_headers ) output = {CONTEXT_UPDATE_EWS_ITEM: items_result} return readable_output, output, items_result def get_items(client: EWSClient, item_ids, target_mailbox=None): """ Get items from target mailbox or client mailbox :param client: EWS Client :param item_ids: item ids to retrieve :param (Optional) target_mailbox: target mailbox to retrieve items from :return: """ item_ids = argToList(item_ids) account = client.get_account(target_mailbox) items = client.get_items_from_mailbox(account, item_ids) items = [x for x in items if isinstance(x, Message)] items_as_incidents = [parse_incident_from_item(x) for x in items] items_to_context = [ parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items ] readable_output = tableToMarkdown( "Get items", items_to_context, ITEMS_RESULTS_HEADERS ) output = { CONTEXT_UPDATE_EWS_ITEM: items_to_context, "Email": [email_ec(item) for item in items], } return readable_output, output, items_as_incidents def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None): """ Retrieve a folder from the target mailbox or client mailbox :param client: EWS Client :param folder_path: folder path to retrieve :param (Optional) target_mailbox: target mailbox :param (Optional) is_public: is the folder public :return: """ account = client.get_account(target_mailbox) is_public = client.is_default_folder(folder_path, is_public) folder = folder_to_context_entry( client.get_folder_by_path(folder_path, account=account, is_public=is_public) ) readable_output = tableToMarkdown(f"Folder {folder_path}", folder) output = {CONTEXT_UPDATE_FOLDER: folder} return readable_output, output, folder def folder_to_context_entry(f): """ Create a context entry from a folder response :param f: folder response :return: dict context entry """ try: f_entry = { "name": f.name, "totalCount": f.total_count, "id": f.id, "childrenFolderCount": f.child_folder_count, "changeKey": f.changekey, } if "unread_count" in [x.name for x in Folder.FIELDS]: f_entry["unreadCount"] = f.unread_count return f_entry except AttributeError: if isinstance(f, dict): return { "name": f.get("name"), "totalCount": f.get("total_count"), "id": f.get("id"), "childrenFolderCount": f.get("child_folder_count"), "changeKey": f.get("changekey"), "unreadCount": f.get("unread_count"), } def mark_item_as_read( client: EWSClient, item_ids, operation="read", target_mailbox=None ): """ Marks item as read :param client: EWS Client :param item_ids: items ids to mark as read :param (Optional) operation: operation to execute :param (Optional) target_mailbox: target mailbox :return: Output tuple """ marked_items = [] item_ids = argToList(item_ids) items = client.get_items_from_mailbox(target_mailbox, item_ids) items = [x for x in items if isinstance(x, Message)] for item in items: item.is_read = operation == "read" item.save() marked_items.append( { ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "marked-as-{}".format(operation), } ) readable_output = tableToMarkdown( f"Marked items ({operation} marked operation)", marked_items ) output = {CONTEXT_UPDATE_EWS_ITEM: marked_items} return readable_output, output, marked_items def random_word_generator(length): """Generate a random string of given length """ letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length)) def handle_html(html_body): """ Extract all data-url content from within the html and return as separate attachments. Due to security implications, we support only images here We might not have Beautiful Soup so just do regex search """ attachments = [] clean_body = '' last_index = 0 for i, m in enumerate( re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)): attachment = { 'data': base64.b64decode(m.group(3)), 'name': f'image{i}' } attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}' attachments.append(attachment) clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid'] last_index = m.end() - 1 clean_body += html_body[last_index:] return clean_body, attachments def collect_manual_attachments(manualAttachObj): """Collect all manual attachments' data Args: manualAttachObj (str): String representation of the manually attached files list. Returns: List[Dict]. List of the files data. """ manually_attached_objects = argToList(manualAttachObj) attachments = [] for attachment in manually_attached_objects: file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName'])) path = file_res['path'] with open(path, 'rb') as fp: data = fp.read() attachments.append({ 'name': attachment['FileName'], 'data': data, 'cid': '' }) return attachments def collect_attachments(attachments_ids, attachments_cids, attachments_names): """Collect all attachments' data Args: attachments_ids (str): String representation of the files ids list. attachments_cids (str): String representation of the files content ids list. attachments_names (str): String representation of the files names list. Returns: List[Dict]. List of the files data. """ attachments = [] files_ids = argToList(attachments_ids) files_cids = argToList(attachments_cids) files_names = argToList(attachments_names) for index, file_id in enumerate(files_ids): try: file_res = demisto.getFilePath(file_id) path = file_res['path'] if len(files_names) > index and files_names[index]: filename = files_names[index] else: filename = file_res['name'] if len(files_cids) > index and files_cids[index]: cid = files_cids[index] else: cid = '' with open(path, 'rb') as fp: data = fp.read() attachments.append({ 'name': filename, 'data': data, 'cid': cid }) except Exception as e: demisto.error(f'Invalid entry {file_id} with exception: {e}') return_error(f'Entry {file_id} is not valid or is not a file entry') return attachments def handle_transient_files(transient_files, transient_files_contents, transient_files_cids): """Creates the transient attachments data Args: transient_files (str): String representation of the transient files names list. transient_files_contents (str): String representation of the transient files content list. transient_files_cids (str): String representation of the transient files content ids list. Returns: List[Dict]. List of the transient files data. """ transient_attachments = [] files_names = argToList(transient_files) files_contents = argToList(transient_files_contents) files_cids = argToList(transient_files_cids) for index in range(len(files_names)): file_name = files_names[index] if index >= len(files_contents): break file_content = bytes(files_contents[index], UTF_8) if index >= len(files_cids): file_cid = '' else: file_cid = files_cids[index] transient_attachments.append({ 'name': file_name, 'data': file_content, 'cid': file_cid }) return transient_attachments def handle_template_params(template_params): """Translates the template params if they exist from the context Args: template_params (str): JSON string that represent the variables names to be replaced and the desired value. Value can be either real value or context key to fetch the value from. Returns: Dict. `variable_name: value_to_use` of the templated parameters. """ actual_params = {} if template_params: try: params = json.loads(template_params) for p in params: if params[p].get('value'): actual_params[p] = params[p]['value'] elif params[p].get('key'): actual_params[p] = demisto.dt(demisto.context(), params[p]['key']) except ValueError as e: return_error('Unable to parse template_params: %s' % (str(e))) return actual_params def create_message_object(to, cc, bcc, subject, body, additional_headers): """Creates the message object according to the existence of additional custom headers. """ if additional_headers: return Message( to_recipients=to, cc_recipients=cc, bcc_recipients=bcc, subject=subject, body=body, **additional_headers ) return Message( to_recipients=to, cc_recipients=cc, bcc_recipients=bcc, subject=subject, body=body ) def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None, additional_headers=None): """Creates the Message object that will be sent. Args: to (list): Main recipients. cc (list): CC recipients. bcc (list): BCC recipients. subject (str): Email's subject. body (str): Email's simple text body. html_body (str): Email's html body. attachments (list): Files to be attached to the mail, both inline and as files. additional_headers (Dict): Custom headers to be added to the message. Returns: Message. Message object ready to be sent. """ if not html_body: # This is a simple text message - we cannot have CIDs here message = create_message_object(to, cc, bcc, subject, body, additional_headers) for attachment in attachments: if not attachment.get('cid'): new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data')) message.attach(new_attachment) else: html_body, html_attachments = handle_html(html_body) attachments += html_attachments message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers) for attachment in attachments: if not attachment.get('cid'): new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data')) else: new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'), is_inline=True, content_id=attachment.get('cid')) message.attach(new_attachment) return message def add_additional_headers(additional_headers): """Adds custom headers to the Message object Args: additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2 Returns: Dict. Headers dictionary in the form of: `header_name: header value` """ headers = dict() for header in argToList(additional_headers): header_name, header_value = header.split('=', 1) class TempClass(ExtendedProperty): distinguished_property_set_id = 'InternetHeaders' property_name = header_name property_type = 'String' try: Message.register(header_name, TempClass) headers[header_name] = header_value except ValueError as e: demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e)) return headers def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None, attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None, transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None, additionalHeader=None, raw_message=None): to = argToList(to) cc = argToList(cc) bcc = argToList(bcc) # Basic validation - we allow pretty much everything but you have to have at least a recipient # We allow messages without subject and also without body if not to and not cc and not bcc: return_error('You must have at least one recipient') if raw_message: message = Message( to_recipients=to, cc_recipients=cc, bcc_recipients=bcc, body=raw_message ) else: if additionalHeader: additionalHeader = add_additional_headers(additionalHeader) # collect all types of attachments attachments = collect_attachments(attachIDs, attachCIDs, attachNames) attachments.extend(collect_manual_attachments(manualAttachObj)) attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID)) # update body and html_body with the templated params, if exists template_params = handle_template_params(templateParams) if template_params: body = body.format(**template_params) if htmlBody: htmlBody = htmlBody.format(**template_params) message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader) client.send_email(message) return 'Mail sent successfully', {}, {} def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None): """ Retrieve item as an eml :param client: EWS Client :param item_id: Item id to retrieve :param (Optional) target_mailbox: target mailbox :return: Output tuple """ account = client.get_account(target_mailbox) item = client.get_item_from_mailbox(account, item_id) if item.mime_content: mime_content = item.mime_content if isinstance(mime_content, bytes): email_content = email.message_from_bytes(mime_content) else: email_content = email.message_from_string(mime_content) if item.headers: attached_email_headers = [ (h, " ".join(map(str.strip, v.split("\r\n")))) for (h, v) in list(email_content.items()) ] for header in item.headers: if ( header.name, header.value, ) not in attached_email_headers and header.name != "Content-Type": email_content.add_header(header.name, header.value) eml_name = item.subject if item.subject else "demisto_untitled_eml" file_result = fileResult(eml_name + ".eml", email_content.as_string()) file_result = ( file_result if file_result else "Failed uploading eml file to war room" ) return file_result def parse_incident_from_item(item): """ Parses an incident from an item :param item: item to parse :return: Parsed item """ incident = {} labels = [] try: incident["details"] = item.text_body or item.body except AttributeError: incident["details"] = item.body incident["name"] = item.subject labels.append({"type": "Email/subject", "value": item.subject}) incident["occurred"] = item.datetime_created.ewsformat() # handle recipients if item.to_recipients: for recipient in item.to_recipients: labels.append({"type": "Email", "value": recipient.email_address}) # handle cc if item.cc_recipients: for recipient in item.cc_recipients: labels.append({"type": "Email/cc", "value": recipient.email_address}) # handle email from if item.sender: labels.append({"type": "Email/from", "value": item.sender.email_address}) # email format email_format = "" try: if item.text_body: labels.append({"type": "Email/text", "value": item.text_body}) email_format = "text" except AttributeError: pass if item.body: labels.append({"type": "Email/html", "value": item.body}) email_format = "HTML" labels.append({"type": "Email/format", "value": email_format}) # handle attachments if item.attachments: incident["attachment"] = [] for attachment in item.attachments: file_result = None label_attachment_type = None label_attachment_id_type = None if isinstance(attachment, FileAttachment): try: if attachment.content: # file attachment label_attachment_type = "attachments" label_attachment_id_type = "attachmentId" # save the attachment file_name = get_attachment_name(attachment.name) file_result = fileResult(file_name, attachment.content) # check for error if file_result["Type"] == entryTypes["error"]: demisto.error(file_result["Contents"]) raise Exception(file_result["Contents"]) # save attachment to incident incident["attachment"].append( { "path": file_result["FileID"], "name": get_attachment_name(attachment.name), } ) except TypeError as e: if str(e) != "must be string or buffer, not None": raise continue else: # other item attachment label_attachment_type = "attachmentItems" label_attachment_id_type = "attachmentItemsId" # save the attachment if attachment.item.mime_content: mime_content = attachment.item.mime_content attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \ else email.message_from_string(mime_content) if attachment.item.headers: attached_email_headers = [ (h, " ".join(map(str.strip, v.split("\r\n")))) for (h, v) in list(attached_email.items()) ] for header in attachment.item.headers: if ( (header.name, header.value) not in attached_email_headers and header.name != "Content-Type" ): attached_email.add_header(header.name, header.value) file_result = fileResult( get_attachment_name(attachment.name) + ".eml", attached_email.as_string(), ) if file_result: # check for error if file_result["Type"] == entryTypes["error"]: demisto.error(file_result["Contents"]) raise Exception(file_result["Contents"]) # save attachment to incident incident["attachment"].append( { "path": file_result["FileID"], "name": get_attachment_name(attachment.name) + ".eml", } ) labels.append( { "type": label_attachment_type, "value": get_attachment_name(attachment.name), } ) labels.append( {"type": label_attachment_id_type, "value": attachment.attachment_id.id} ) # handle headers if item.headers: headers = [] for header in item.headers: labels.append( { "type": "Email/Header/{}".format(header.name), "value": str(header.value), } ) headers.append("{}: {}".format(header.name, header.value)) labels.append({"type": "Email/headers", "value": "\r\n".join(headers)}) # handle item id if item.message_id: labels.append({"type": "Email/MessageId", "value": str(item.message_id)}) if item.id: labels.append({"type": "Email/ID", "value": item.id}) labels.append({"type": "Email/itemId", "value": item.id}) # handle conversion id if item.conversation_id: labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id}) incident["labels"] = labels incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False) return incident def fetch_emails_as_incidents(client: EWSClient, last_run): """ Fetch incidents :param client: EWS Client :param last_run: last run dict :return: """ last_run = get_last_run(client, last_run) try: last_emails = fetch_last_emails( client, client.folder_name, last_run.get(LAST_RUN_TIME), last_run.get(LAST_RUN_IDS), ) ids = deque( last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size ) incidents = [] incident: Dict[str, str] = {} for item in last_emails: if item.message_id: ids.append(item.message_id) incident = parse_incident_from_item(item) incidents.append(incident) if len(incidents) >= client.max_fetch: break last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME)) if isinstance(last_run_time, EWSDateTime): last_run_time = last_run_time.ewsformat() new_last_run = { LAST_RUN_TIME: last_run_time, LAST_RUN_FOLDER: client.folder_name, LAST_RUN_IDS: list(ids), ERROR_COUNTER: 0, } demisto.setLastRun(new_last_run) return incidents except RateLimitError: if LAST_RUN_TIME in last_run: last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat() if ERROR_COUNTER not in last_run: last_run[ERROR_COUNTER] = 0 last_run[ERROR_COUNTER] += 1 demisto.setLastRun(last_run) if last_run[ERROR_COUNTER] > 2: raise return [] def fetch_last_emails( client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None ): """ Fetches last emails :param client: EWS client :param (Optional) folder_name: folder name to pull from :param (Optional) since_datetime: items will be searched after this datetime :param (Optional) exclude_ids: exclude ids from fetch :return: list of exchangelib.Items """ qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder) if since_datetime: qs = qs.filter(datetime_received__gte=since_datetime) else: last_10_min = EWSDateTime.now(tz=EWSTimeZone.timezone("UTC")) - timedelta( minutes=10 ) qs = qs.filter(last_modified_time__gte=last_10_min) qs = qs.filter().only(*[x.name for x in Message.FIELDS]) qs = qs.filter().order_by("datetime_received") result = qs.all() result = [x for x in result if isinstance(x, Message)] if exclude_ids and len(exclude_ids) > 0: exclude_ids = set(exclude_ids) result = [x for x in result if x.message_id not in exclude_ids] return result def test_module(client: EWSClient, max_fetch): """ test-module * Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH * Account can be retrieved * Account has read rights * Test access to fetch folder :param client: EWS Client :param max_fetch: Max fetches per incident :return: "ok" """ try: if int(max_fetch) > MAX_INCIDENTS_PER_FETCH: return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. ' f'You provided: {max_fetch}') account = client.get_account() if not account.root.effective_rights.read: # pylint: disable=E1101 raise Exception( "Success to authenticate, but user has no permissions to read from the mailbox. " "Need to delegate the user permissions to the mailbox - " "please read integration documentation and follow the instructions" ) client.get_folder_by_path( client.folder_name, account, client.is_public_folder ).test_access() except ErrorFolderNotFound as e: if "Top of Information Store" in str(e): raise Exception( "Success to authenticate, but user probably has no permissions to read from the specific folder." "Check user permissions. You can try !ews-find-folders command to " "get all the folders structure that the user has permissions to" ) return "ok" def sub_main(): is_test_module = False params = demisto.params() args = prepare_args(demisto.args()) params['default_target_mailbox'] = args.get('target_mailbox', params['default_target_mailbox']) client = EWSClient(**params) start_logging() try: command = demisto.command() # commands that return a single note result normal_commands = { "ews-get-searchable-mailboxes": get_searchable_mailboxes, "ews-move-item-between-mailboxes": move_item_between_mailboxes, "ews-move-item": move_item, "ews-delete-items": delete_items, "ews-search-mailbox": search_items_in_mailbox, "ews-get-contacts": get_contacts, "ews-get-out-of-office": get_out_of_office_state, "ews-recover-messages": recover_soft_delete_item, "ews-create-folder": create_folder, "ews-mark-item-as-junk": mark_item_as_junk, "ews-find-folders": find_folders, "ews-get-items-from-folder": get_items_from_folder, "ews-get-items": get_items, "ews-get-folder": get_folder, "ews-expand-group": get_expanded_group, "ews-mark-items-as-read": mark_item_as_read, "send-mail": send_email, } # commands that may return multiple results or non-note result special_output_commands = { "ews-get-attachment": fetch_attachments_for_message, "ews-delete-attachment": delete_attachments_for_message, "ews-get-items-as-eml": get_item_as_eml, } # system commands: if command == "test-module": is_test_module = True demisto.results(test_module(client, params.get('max_fetch'))) elif command == "fetch-incidents": last_run = demisto.getLastRun() incidents = fetch_emails_as_incidents(client, last_run) demisto.incidents(incidents) # special outputs commands elif command in special_output_commands: demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator] # normal commands else: output = normal_commands[command](client, **args) # type: ignore[operator] return_outputs(*output) except Exception as e: start_logging() debug_log = log_stream.getvalue() # type: ignore[union-attr] error_message_simple = "" # Office365 regular maintenance case if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance( e, ErrorMailboxMoveInProgress ): log_message = ( "Office365 is undergoing load balancing operations. " "As a result, the service is temporarily unavailable." ) if demisto.command() == "fetch-incidents": demisto.info(log_message) demisto.incidents([]) sys.exit(0) if is_test_module: demisto.results( log_message + " Please retry the instance configuration test." ) sys.exit(0) error_message_simple = log_message + " Please retry your request." if isinstance(e, ConnectionError): error_message_simple = ( "Could not connect to the server.\n" f"Additional information: {str(e)}" ) else: if is_test_module and isinstance(e, MalformedResponseError): error_message_simple = ( "Got invalid response from the server.\n" ) # Legacy error handling if "Status code: 401" in debug_log: error_message_simple = ( "Got unauthorized from the server. " ) if "Status code: 503" in debug_log: error_message_simple = ( "Got timeout from the server. " "Probably the server is not reachable with the current settings. " ) if not error_message_simple: error_message = error_message_simple = str(e) else: error_message = error_message_simple + "\n" + str(e) stacktrace = traceback.format_exc() if stacktrace: error_message += "\nFull stacktrace:\n" + stacktrace if debug_log: error_message += "\nFull debug log:\n" + debug_log if demisto.command() == "fetch-incidents": raise if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError): return_error( message="Selected invalid field, please specify valid field name.", error=e, ) if is_test_module: demisto.results(error_message_simple) else: demisto.results( { "Type": entryTypes["error"], "ContentsFormat": formats["text"], "Contents": error_message_simple, } ) demisto.error(f"{e.__class__.__name__}: {error_message}") finally: exchangelib_cleanup() if log_stream: try: logging.getLogger().removeHandler(log_handler) # type: ignore log_stream.close() except Exception as ex: demisto.error( "EWS: unexpected exception when trying to remove log handler: {}".format( ex ) ) def process_main(): """setup stdin to fd=0 so we can read from the server""" sys.stdin = os.fdopen(0, "r") sub_main() def main(): # When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage # collector. `separate_process` flag will run the integration on a separate process that will prevent # memory leakage. separate_process = demisto.params().get("separate_process", False) demisto.debug("Running as separate_process: {}".format(separate_process)) if separate_process: try: p = Process(target=process_main) p.start() p.join() except Exception as ex: demisto.error("Failed starting Process: {}".format(ex)) else: sub_main() from MicrosoftApiModule import * # noqa: E402 if __name__ in ("__main__", "__builtin__", "builtins"): main()
test_lock.py
""" Copyright (c) 2008-2020, Jesus Cea Avion <jcea@jcea.es> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Jesus Cea Avion nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ TestCases for testing the locking sub-system. """ import time import unittest from .test_all import db, test_support, verbose, have_threads, \ get_new_environment_path, get_new_database_path if have_threads : from threading import Thread import sys if sys.version_info[0] < 3 : from threading import currentThread else : from threading import current_thread as currentThread #---------------------------------------------------------------------- class LockingTestCase(unittest.TestCase): def setUp(self): self.homeDir = get_new_environment_path() self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_CREATE) def tearDown(self): self.env.close() test_support.rmtree(self.homeDir) def test01_simple(self): if verbose: print('\n', '-=' * 30) print("Running %s.test01_simple..." % self.__class__.__name__) anID = self.env.lock_id() if verbose: print("locker ID: %s" % anID) lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE) if verbose: print("Aquired lock: %s" % lock) self.env.lock_put(lock) if verbose: print("Released lock: %s" % lock) self.env.lock_id_free(anID) def test02_threaded(self): if verbose: print('\n', '-=' * 30) print("Running %s.test02_threaded..." % self.__class__.__name__) threads = [] threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_WRITE,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_READ,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_READ,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_WRITE,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_READ,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_READ,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_WRITE,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_WRITE,))) threads.append(Thread(target = self.theThread, args=(db.DB_LOCK_WRITE,))) for t in threads: import sys if sys.version_info[0] < 3 : t.setDaemon(True) else : t.daemon = True t.start() for t in threads: t.join() def test03_lock_timeout(self): self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT) self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0) self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT) self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0) self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT) self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456) self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT) self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123) def test04_lock_timeout2(self): self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT) self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT) self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT) self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT) def deadlock_detection() : while not deadlock_detection.end : deadlock_detection.count = \ self.env.lock_detect(db.DB_LOCK_EXPIRE) if deadlock_detection.count : while not deadlock_detection.end : pass break time.sleep(0.01) deadlock_detection.end=False deadlock_detection.count=0 t=Thread(target=deadlock_detection) import sys if sys.version_info[0] < 3 : t.setDaemon(True) else : t.daemon = True t.start() self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT) anID = self.env.lock_id() anID2 = self.env.lock_id() self.assertNotEqual(anID, anID2) lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE) start_time=time.time() self.assertRaises(db.DBLockNotGrantedError, self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ) end_time=time.time() deadlock_detection.end=True # Floating point rounding self.assertTrue((end_time-start_time) >= 0.0999) self.env.lock_put(lock) t.join() self.env.lock_id_free(anID) self.env.lock_id_free(anID2) self.assertTrue(deadlock_detection.count>0) def theThread(self, lockType): import sys if sys.version_info[0] < 3 : name = currentThread().getName() else : name = currentThread().name if lockType == db.DB_LOCK_WRITE: lt = "write" else: lt = "read" anID = self.env.lock_id() if verbose: print("%s: locker ID: %s" % (name, anID)) for i in range(1000) : lock = self.env.lock_get(anID, "some locked thing", lockType) if verbose: print("%s: Aquired %s lock: %s" % (name, lt, lock)) self.env.lock_put(lock) if verbose: print("%s: Released %s lock: %s" % (name, lt, lock)) self.env.lock_id_free(anID) #---------------------------------------------------------------------- def test_suite(): suite = unittest.TestSuite() if have_threads: suite.addTest(unittest.makeSuite(LockingTestCase)) else: suite.addTest(unittest.makeSuite(LockingTestCase, 'test01')) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
test_thread.py
from __future__ import print_function import datetime import threading import time import caffi.ca as ca def setup_module(module): # create explicitly a preemptive enabled context # so that it can attached in other threads status = ca.create_context(True) assert status == ca.ECA.NORMAL global ctx ctx = ca.current_context() # create channel global chid status, chid = ca.create_channel('catest') assert status == ca.ECA.NORMAL # wait for connections status = ca.pend_io(3) assert status == ca.ECA.NORMAL # put status = ca.put(chid, 0) assert status == ca.ECA.NORMAL ca.flush_io() def monitor(epics_arg): print(datetime.datetime.fromtimestamp(epics_arg['value']['stamp']['timestamp']), epics_arg['value']['value']) global evid status, evid = ca.create_subscription(chid, monitor, chtype=ca.DBR.TIME_DOUBLE) def thread_even(): status = ca.attach_context(ctx) assert status == ca.ECA.NORMAL for i in range(0, 200, 2): ca.put(chid, i) ca.flush_io() def thread_odd(): status = ca.attach_context(ctx) assert status == ca.ECA.NORMAL for i in range(1, 200, 2): ca.put(chid, i) ca.flush_io() def test_thread(): tid1 = threading.Thread(target=thread_odd) tid2 = threading.Thread(target=thread_even) tid1.start() tid2.start() tid1.join() tid2.join() # wait for the last monitor to arrive time.sleep(2) # the value should be either 199 or 198, depending on which thread ends last status, value = ca.get(chid) assert status == ca.ECA.NORMAL status = ca.pend_io(3) assert status == ca.ECA.NORMAL assert value.get() in [198, 199] def teardown_module(module): # clear subscription ca.clear_subscription(evid) # clear channel ca.clear_channel(chid) ca.flush_io() # destroy context ca.destroy_context()
instance.py
import abc import asyncio import asyncio.subprocess import functools import logging import math import os import random import re import select import shutil import socket import string import subprocess import time from typing import Optional import yaml from threading import Thread __all__ = ( 'TarantoolInstanceProtocol', 'TarantoolInstance', 'TarantoolAsyncInstance', 'TarantoolSyncInstance', 'TarantoolSyncDockerInstance' ) from asynctnt.utils import get_running_loop VERSION_STRING_REGEX = re.compile(r'\s*([\d.]+).*') class TarantoolInstanceProtocol(asyncio.SubprocessProtocol): def __init__(self, tnt, on_exit): super().__init__() self._tnt = tnt self._on_exit = on_exit self._transport = None @property def logger(self): return self._tnt.logger @property def pid(self): return self._transport.get_pid() if self._transport else None def connection_made(self, transport): self.logger.info('Process started') self._transport = transport def pipe_data_received(self, fd, data): if not data: return line = data.decode() line = line.replace('\r', '') lines = line.split('\n') for line in lines: line = line.replace('\n', '') line = line.strip() if line: self.logger.info('=> %s', line) def process_exited(self): return_code = self._transport.get_returncode() if callable(self._on_exit): self._on_exit(return_code) @property def returncode(self): return self._transport.get_returncode() async def wait(self): """Wait until the process exit and return the process return code. This method is a coroutine.""" return await self._transport._wait() def send_signal(self, signal): self._transport.send_signal(signal) def terminate(self): self._transport.terminate() def kill(self): self._transport.kill() class TarantoolInstance(metaclass=abc.ABCMeta): def __init__(self, *, host='127.0.0.1', port=3301, console_host=None, console_port=3302, replication_source=None, title=None, logger=None, log_level=5, slab_alloc_arena=0.1, wal_mode='none', root=None, specify_work_dir=True, cleanup=True, initlua_template=None, applua='-- app.lua --', timeout=5., command_to_run='tarantool', command_args=None): """ :param host: The host which Tarantool instance is going to be listening on (default = 127.0.0.1) :param port: The port which Tarantool instance is going to be listening on (default = 3301) :param console_host: The host which Tarantool console is going to be listening on (to execute admin commands) (default = host) :param console_port: The port which Tarantool console is going to be listening on (to execute admin commands) (default = 3302) :param replication_source: The replication source string. If it's None, then replication_source=nil in box.cfg :param title: Tarantool instance title (substitutes into custom_proc_title). (default = "tnt[host:port]") :param logger: logger, where all messages are logged to. default logger name = Tarantool[host:port] :param log_level: Tarantool's log_level (default = 5) :param slab_alloc_arena: Tarantool's slab_alloc_arena (default = 0.1) :param wal_mode: Tarantool's wal_mode (default = 'none') :param root: Tarantool's work_dir location :param specify_work_dir: Specify or not the workdir of Tarantool :param cleanup: do cleanup or not :param initlua_template: The initial init.lua template (default can be found in _create_initlua_template function) :param applua: Any extra lua script (a string) (default = '-- app.lua --') :param timeout: Timeout in seconds - how much to wait for tarantool to become active :param command_to_run: command exe :param command_args: command args """ self._host = host self._port = port self._console_host = console_host or host self._console_port = console_port self._replication_source = replication_source self._title = title or self._generate_title() self._logger = logger or logging.getLogger(self.fingerprint) self._log_level = log_level self._slab_alloc_arena = slab_alloc_arena self._wal_mode = wal_mode self._root = root or self._generate_root_folder_name() self._specify_work_dir = specify_work_dir self._cleanup = cleanup self._initlua_template = initlua_template or \ self._create_initlua_template() self._applua = applua self._command_to_run = command_to_run self._command_args = command_args self._timeout = timeout self._is_running = False @property def replication_source(self): return self._replication_source @replication_source.setter def replication_source(self, value): self._replication_source = value def _random_string(self, length, *, source=string.ascii_uppercase + string.ascii_lowercase + string.digits): return ''.join(random.choice(source) for _ in range(length)) def _generate_title(self): return 'tnt[{}:{}]'.format(self._host, self._port) def _generate_root_folder_name(self): cwd = os.getcwd() path = None while path is None or os.path.isdir(path): folder_name = '__tnt__' + \ self._random_string(10) path = os.path.join(cwd, folder_name) return path @staticmethod def get_random_port(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('', 0)) port = sock.getsockname()[1] sock.close() return port def _create_initlua_template(self): return """ local function check_version(expected, version) -- from tarantool/queue compat.lua local fun = require 'fun' local iter, op = fun.iter, fun.operator local function split(self, sep) local sep, fields = sep or ":", {} local pattern = string.format("([^%s]+)", sep) self:gsub(pattern, function(c) table.insert(fields, c) end) return fields end local function reducer(res, l, r) if res ~= nil then return res end if tonumber(l) == tonumber(r) then return nil end return tonumber(l) > tonumber(r) end local function split_version(version_string) local vtable = split(version_string, '.') local vtable2 = split(vtable[3], '-') vtable[3], vtable[4] = vtable2[1], vtable2[2] return vtable end local function check_version_internal(expected, version) version = version or _TARANTOOL if type(version) == 'string' then version = split_version(version) end local res = iter(version):zip(expected) :reduce(reducer, nil) if res or res == nil then res = true end return res end return check_version_internal(expected, version) end local cfg = { listen = "${host}:${port}", wal_mode = "${wal_mode}", custom_proc_title = "${custom_proc_title}", slab_alloc_arena = ${slab_alloc_arena}, work_dir = ${work_dir}, log_level = ${log_level} } if check_version({1, 7}, _TARANTOOL) then cfg.replication = ${replication_source} else local repl = ${replication_source} if type(repl) == 'table' then repl = table.concat(repl, ',') end cfg.replication_source = repl end require('console').listen("${console_host}:${console_port}") box.cfg(cfg) box.schema.user.grant("guest", "read,write,execute", "universe", nil, {if_not_exists = true}) ${applua} """ def _render_initlua(self): template = string.Template(self._initlua_template) if not self._replication_source: replication = 'nil' elif isinstance(self._replication_source, str): replication = '"{}"'.format(self._replication_source) elif isinstance(self._replication_source, (list, tuple)): replication = ['"{}"'.format(e) for e in self._replication_source] replication = ",".join(replication) replication = "{" + replication + "}" else: raise TypeError('replication is of unsupported type') work_dir = 'nil' if self._specify_work_dir: work_dir = '"' + self._root + '"' d = { 'host': self._host, 'port': self._port, 'console_host': self._console_host, 'console_port': self._console_port, 'wal_mode': self._wal_mode, 'custom_proc_title': self._title, 'slab_alloc_arena': self._slab_alloc_arena, 'replication_source': replication, 'work_dir': work_dir, 'log_level': self._log_level, 'applua': self._applua if self._applua else '' } return template.substitute(d) def _save_initlua(self, initlua): initlua = initlua.replace(' ' * 4, '') initlua_path = os.path.join(self._root, 'init.lua') with open(initlua_path, 'w') as f: f.write(initlua) return initlua_path @property def logger(self): return self._logger @property def fingerprint(self): return 'Tarantool[{}:{}]'.format(self._host, self._port) def prepare(self, recreate): if recreate and os.path.exists(self._root): shutil.rmtree(self._root, ignore_errors=True) if not os.path.exists(self._root): os.mkdir(self._root) initlua = self._render_initlua() initlua_path = self._save_initlua(initlua) return initlua_path @property def host(self): return self._host @property def port(self): return self._port @property def console_port(self): return self._console_port @property def is_running(self): return self._is_running @property @abc.abstractmethod def pid(self): raise NotImplementedError @abc.abstractmethod def command(self, cmd, print_greeting=True): raise NotImplementedError @abc.abstractmethod def start(self, *, wait=True, recreate=True): raise NotImplementedError @abc.abstractmethod def stop(self): raise NotImplementedError @abc.abstractmethod def terminate(self): raise NotImplementedError @abc.abstractmethod def kill(self): raise NotImplementedError def cleanup(self): self._is_running = False if self._cleanup: shutil.rmtree(self._root, ignore_errors=True) if self.host == 'unix/': shutil.rmtree(self.port, ignore_errors=True) self._logger.info('Destroyed Tarantool instance (%s)', self._title) class TcpSocket: BUFFER_SIZE = 1024 def __init__(self, host, port): self._host = host self._port = port self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self): self._sock.connect((self._host, self._port)) def close(self): if self._sock is not None: self._sock.close() def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def write(self, data, flags=0): self._sock.sendall(data, flags) def read(self, n, flags=0): buf = bytearray() bytes_recd = 0 while bytes_recd < n: chunk = self._sock.recv(self.BUFFER_SIZE, flags) if chunk == b'': raise RuntimeError("socket connection broken") buf.extend(chunk) bytes_recd += len(chunk) return bytes(buf) def read_until(self, separator=b'', flags=0): buf = bytearray() search_start = 0 while True: chunk = self._sock.recv(self.BUFFER_SIZE, flags) if chunk == b'': raise RuntimeError("socket connection broken") buf.extend(chunk) pos = buf.find(separator, search_start) if pos != -1: return bytes(buf[:(pos + len(separator))]) search_start = len(buf) - len(separator) - 1 class TarantoolSyncInstance(TarantoolInstance): WAIT_TIMEOUT = 5 def __init__(self, **kwargs): super().__init__(**kwargs) self._process = None self._logger_thread = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() @property def pid(self): return self._process.pid if self._process is not None else None def start(self, *, wait=True, recreate=True): self._logger.info('Starting Tarantool instance (%s)', self._title) initlua_path = self.prepare(recreate) self._logger.info('Launching process') if not self._command_args: args = [self._command_to_run, initlua_path] else: args = [self._command_to_run, *self._command_args] flags = 0 if os.name == 'nt': flags |= subprocess.CREATE_NEW_PROCESS_GROUP self._process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=flags) self._logger_thread = Thread(target=self._log_reader) self._logger_thread.start() if not wait: self._is_running = True return interval = 0.1 attempts = math.ceil(self._timeout / interval) while attempts > 0: try: status = self.command('box.info.status', print_greeting=True) if status: status = status[0] if status == 'running': self._logger.info('Moved to the running state') break except (OSError, RuntimeError): pass time.sleep(interval) attempts -= 1 else: raise TimeoutError( 'Timeout while waiting for Tarantool to move to running state') self._is_running = True def _log_reader(self): def check_io(): fds = [] for h in [self._process.stdout, self._process.stderr]: if h is not None and not h.closed: fds.append(h) if not fds: return False try: ready_to_read = select.select(fds, [], [], 10)[0] except (ValueError, OSError): # I/O operation on a closed socket return False for io in ready_to_read: if io.closed: continue try: line = io.readline() except ValueError: # assuming it's just an fd error, so skip # PyMemoryView_FromBuffer(): info->buf must not be NULL continue line = line.decode() if len(line) > 0: self._logger.info(line[:-1]) return True while self._is_running and self._process.poll() is None: if not check_io(): break check_io() def stop(self): if self._process is not None: self._process.terminate() self._logger.info('Waiting for process to complete') self._wait(self.WAIT_TIMEOUT, wait=True) self.cleanup() def terminate(self): if self._process is not None: self._process.terminate() self._wait(self.WAIT_TIMEOUT, wait=False) self.cleanup() def kill(self): if self._process is not None: self._process.kill() self.cleanup() def _wait(self, timeout, wait=True): if self._process: if wait: try: self._process.wait(timeout) except subprocess.TimeoutExpired: pass try: os.kill(self._process.pid, 0) self._process.kill() self.logger.warning('Force killed %s', self.fingerprint) except OSError: pass def cleanup(self): if self._process is not None: for h in [self._process.stdout, self._process.stderr, self._process.stdin]: if h is not None: h.close() self._is_running = False if self._logger_thread is not None: self._logger_thread.join() super().cleanup() def version(self) -> Optional[tuple]: res = self.command("box.info.version") if not res: return None res = res[0] m = VERSION_STRING_REGEX.match(res) if m is not None: ver = m.group(1) return tuple(map(int, ver.split('.'))) def command(self, cmd, print_greeting=True): s = TcpSocket(self._console_host, self._console_port) try: s.connect() greeting = s.read(128).decode() if print_greeting: self._logger.info(greeting) if isinstance(cmd, str): cmd = cmd.encode('utf-8') s.write(cmd + b'\n') data = s.read_until(b'...\n').decode() data = yaml.full_load(data) return data finally: s.close() class TarantoolAsyncInstance(TarantoolInstance): def __init__(self, **kwargs): super().__init__(**kwargs) self._loop = get_running_loop(kwargs.pop('loop', None)) self._is_stopping = False self._transport = None self._protocol = None self._last_return_code = None self._stop_event = asyncio.Event() @property def pid(self): return self._protocol.pid if self._protocol else None def prepare(self, recreate): self._last_return_code = None return super().prepare(recreate) def _on_process_exit(self, return_code): self._last_return_code = return_code if self._is_stopping: return self._stop_event.set() self.cleanup() async def wait_stopped(self): return await self._stop_event.wait() async def version(self): return await self.command("box.info.version") async def command(self, cmd, print_greeting=True): reader, writer = await asyncio.open_connection(self._console_host, self._console_port) greeting = (await reader.read(128)).decode() if print_greeting: self._logger.info(greeting) try: if isinstance(cmd, str): cmd = cmd.encode('utf-8') writer.write(cmd + b'\n') data = (await reader.readuntil(b'...\n')).decode() data = yaml.full_load(data) return data finally: writer.close() async def start(self, *, wait=True, recreate=True): self._logger.info('Starting Tarantool instance (%s)', self._title) self._stop_event.clear() initlua_path = self.prepare(recreate) self._logger.info('Launching process') factory = functools.partial( TarantoolInstanceProtocol, self, self._on_process_exit) if not self._command_args: args = [initlua_path] else: args = self._command_args self._transport, self._protocol = await self._loop.subprocess_exec( factory, self._command_to_run, *args, stdin=None, stderr=asyncio.subprocess.PIPE ) if not wait: self._is_running = True return interval = 0.1 attempts = math.ceil(self._timeout / interval) while attempts > 0: if self._protocol is None or self._protocol.returncode is not None: raise RuntimeError( '{} exited unexpectedly with exit code {}'.format( self.fingerprint, self._last_return_code) ) try: status = await self.command('box.info.status', print_greeting=False) if status: status = status[0] if status == 'running': self._logger.info('Moved to the running state') break except OSError: pass await asyncio.sleep(interval) attempts -= 1 else: raise asyncio.TimeoutError( 'Timeout while waiting for Tarantool to move to running state') self._is_running = True async def stop(self): if self._protocol is not None: self._is_stopping = True self._protocol.terminate() if not self._is_running: return self._logger.info('Waiting for process to complete') await self._protocol.wait() self.cleanup() def terminate(self): if self._protocol is not None: self._is_stopping = True self._protocol.terminate() self.cleanup() def kill(self): if self._protocol is not None: self._is_stopping = True self._protocol.kill() self.cleanup() def cleanup(self): return_code = self._protocol.returncode self._logger.info('Finished with return code %d', return_code) self._is_stopping = False if self._transport: self._transport.close() self._transport = None self._protocol = None self._stop_event.clear() super().cleanup() class TarantoolSyncDockerInstance(TarantoolSyncInstance): def __init__(self, *, docker_image=None, docker_tag=None, host='0.0.0.0', port=3301, console_host=None, console_port=3302, replication_source=None, title=None, logger=None, log_level=5, slab_alloc_arena=0.1, wal_mode='none', initlua_template=None, applua='-- app.lua --', timeout=10.): super().__init__(host=host, port=port, console_host=console_host, console_port=console_port, replication_source=replication_source, title=title, logger=logger, log_level=log_level, slab_alloc_arena=slab_alloc_arena, wal_mode=wal_mode, root=None, specify_work_dir=False, cleanup=True, initlua_template=initlua_template, applua=applua, timeout=timeout) self._docker_image = docker_image or 'tarantool/tarantool' self._docker_tag = docker_tag or '1' cmd = "docker run --rm " \ "-p {port}:{port} " \ "-p {console_port}:{console_port} " \ "-v {root}:/opt/tarantool " \ "{docker_image}:{docker_tag} " \ "tarantool /opt/tarantool/init.lua" cmd = cmd.format( port=self.port, console_port=self.console_port, root=self._root, docker_image=self._docker_image, docker_tag=self._docker_tag ) self.logger.debug(cmd) args = cmd.split(' ') self._command_to_run = args[0] self._command_args = args[1:]
protocol.py
""" The RPyC protocol """ import sys import weakref import itertools import socket import time import gc from threading import Lock, RLock, Event, Thread from rpyc.lib.compat import pickle, next, is_py3k, maxint, select_error from rpyc.lib.colls import WeakValueDict, RefCountingColl from rpyc.core import consts, brine, vinegar, netref from rpyc.core.async import AsyncResult class PingError(Exception): """The exception raised should :func:`Connection.ping` fail""" pass DEFAULT_CONFIG = dict( # ATTRIBUTES allow_safe_attrs = True, allow_exposed_attrs = True, allow_public_attrs = False, allow_all_attrs = False, safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__', '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', '__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__', '__truediv__', '__xor__', 'next', '__length_hint__', '__enter__', '__exit__', '__next__',]), exposed_prefix = "exposed_", allow_getattr = True, allow_setattr = False, allow_delattr = False, # EXCEPTIONS include_local_traceback = True, instantiate_custom_exceptions = False, import_custom_exceptions = False, instantiate_oldstyle_exceptions = False, # which don't derive from Exception propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party log_exceptions = True, # MISC allow_pickle = False, connid = None, credentials = None, endpoints = None, logger = None, sync_request_timeout = 30, ) """ The default configuration dictionary of the protocol. You can override these parameters by passing a different configuration dict to the :class:`Connection` class. .. note:: You only need to override the parameters you want to change. There's no need to repeat parameters whose values remain unchanged. ======================================= ================ ===================================================== Parameter Default value Description ======================================= ================ ===================================================== ``allow_safe_attrs`` ``True`` Whether to allow the use of *safe* attributes (only those listed as ``safe_attrs``) ``allow_exposed_attrs`` ``True`` Whether to allow exposed attributes (attributes that start with the ``exposed_prefix``) ``allow_public_attrs`` ``False`` Whether to allow public attributes (attributes that don't start with ``_``) ``allow_all_attrs`` ``False`` Whether to allow all attributes (including private) ``safe_attrs`` ``set([...])`` The set of attributes considered safe ``exposed_prefix`` ``"exposed_"`` The prefix of exposed attributes ``allow_getattr`` ``True`` Whether to allow getting of attributes (``getattr``) ``allow_setattr`` ``False`` Whether to allow setting of attributes (``setattr``) ``allow_delattr`` ``False`` Whether to allow deletion of attributes (``delattr``) ``allow_pickle`` ``False`` Whether to allow the use of ``pickle`` ``include_local_traceback`` ``True`` Whether to include the local traceback in the remote exception ``instantiate_custom_exceptions`` ``False`` Whether to allow instantiation of custom exceptions (not the built in ones) ``import_custom_exceptions`` ``False`` Whether to allow importing of exceptions from not-yet-imported modules ``instantiate_oldstyle_exceptions`` ``False`` Whether to allow instantiation of exceptions which don't derive from ``Exception``. This is not applicable for Python 3 and later. ``propagate_SystemExit_locally`` ``False`` Whether to propagate ``SystemExit`` locally (kill the server) or to the other party (kill the client) ``propagate_KeyboardInterrupt_locally`` ``False`` Whether to propagate ``KeyboardInterrupt`` locally (kill the server) or to the other party (kill the client) ``logger`` ``None`` The logger instance to use to log exceptions (before they are sent to the other party) and other events. If ``None``, no logging takes place. ``connid`` ``None`` **Runtime**: the RPyC connection ID (used mainly for debugging purposes) ``credentials`` ``None`` **Runtime**: the credentails object that was returned by the server's :ref:`authenticator <api-authenticators>` or ``None`` ``endpoints`` ``None`` **Runtime**: The connection's endpoints. This is a tuple made of the local socket endpoint (``getsockname``) and the remote one (``getpeername``). This is set by the server upon accepting a connection; client side connections do no have this configuration option set. ``sync_request_timeout`` ``30`` Default timeout for waiting results ======================================= ================ ===================================================== """ _connection_id_generator = itertools.count(1) class Connection(object): """The RPyC *connection* (AKA *protocol*). :param service: the :class:`Service <rpyc.core.service.Service>` to expose :param channel: the :class:`Channel <rpyc.core.channel.Channel>` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) :param _lazy: whether or not to initialize the service with the creation of the connection. Default is True. If set to False, you will need to call :func:`_init_service` manually later """ def __init__(self, service, channel, config = {}, _lazy = False): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._sync_lock = RLock() self._sync_event = Event() self._async_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = service(weakref.proxy(self)) if not _lazy: self._init_service() self._closed = False def _init_service(self): self._local_root.on_connect() def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect() self._sync_replies.clear() self._async_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() def close(self, _catchall = True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data = None, timeout = 3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. # NOTE: Atomic list operations should be thread safe, # please call me out if they are not on all implementations! self._send_queue.append(data) # It is crucial to check the queue each time AFTER releasing the lock: while self._send_queue: if not self._sendlock.acquire(False): # Another thread holds the lock. It will send the data after # it's done with its current job. We can safely return. return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() def _send_request(self, seq, handler, args): self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) def _send_reply(self, seq, obj): self._send(consts.MSG_REPLY, seq, self._box(obj)) def _send_exception(self, seq, exctype, excval, exctb): exc = vinegar.dump(exctype, excval, exctb, include_local_traceback = self._config["include_local_traceback"]) self._send(consts.MSG_EXCEPTION, seq, exc) # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: proxy = self._proxy_cache[oid] proxy.____refcount__ += 1 # other side increased refcount on boxing, # if I'm returning from cache instead of new object, # must increase refcount to match return proxy proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(weakref.ref(self), oid) # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]: raise self._send_exception(seq, t, v, tb) else: self._send_reply(seq, res) def _dispatch_reply(self, seq, raw): obj = self._unbox(raw) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(False, obj) else: self._sync_replies[seq] = (False, obj) def _dispatch_exception(self, seq, raw): obj = vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) if seq in self._async_callbacks: self._async_callbacks.pop(seq)(True, obj) else: self._sync_replies[seq] = (True, obj) # # serving # def _recv(self, timeout, wait_for_lock): if not self._recvlock.acquire(wait_for_lock): return None try: if self._channel.poll(timeout): data = self._channel.recv() else: data = None except EOFError: self.close() raise finally: self._recvlock.release() return data def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: self._dispatch_reply(seq, args) elif msg == consts.MSG_EXCEPTION: self._dispatch_exception(seq, args) else: raise ValueError("invalid message type: %r" % (msg,)) def sync_recv_and_dispatch(self, timeout, wait_for_lock): # lock or wait for signal if self._sync_lock.acquire(False): try: self._sync_event.clear() data = self._recv(timeout, wait_for_lock = False) if not data: return False self._dispatch(data) return True finally: self._sync_lock.release() self._sync_event.set() else: self._sync_event.wait() def poll(self, timeout = 0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" return self.sync_recv_and_dispatch(timeout, wait_for_lock=False) def serve(self, timeout = 1): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ return self.sync_recv_and_dispatch(timeout, wait_for_lock=True) def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass threads = [] """Serves all requests and replies for as long as the connection is alive.""" try: for _ in range(thread_count): thread = Thread(target=_thread_target) thread.daemon = True thread.start() threads.append(thread) for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False t0 = time.time() duration = timeout try: while True: if self.poll(duration): at_least_once = True if timeout is not None: duration = t0 + timeout - time.time() if duration < 0: break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ seq = self._get_seq_id() self._send_request(seq, handler, args) timeout = self._config["sync_request_timeout"] while seq not in self._sync_replies: self.sync_recv_and_dispatch(timeout, True) isexc, obj = self._sync_replies.pop(seq) if isexc: raise obj else: return obj def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._get_seq_id() self._async_callbacks[seq] = callback try: self._send_request(seq, handler, args) except: if seq in self._async_callbacks: del self._async_callbacks[seq] raise def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),)) res = AsyncResult(weakref.proxy(self)) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name): if self._config["allow_exposed_attrs"]: if name.startswith(self._config["exposed_prefix"]): name2 = name else: name2 = self._config["exposed_prefix"] + name if hasattr(obj, name2): return name2 if self._config["allow_all_attrs"]: return name if self._config["allow_safe_attrs"] and name in self._config["safe_attrs"]: return name if self._config["allow_public_attrs"] and not name.startswith("_"): return name return False def _access_attr(self, oid, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue obj = self._local_objects[oid] accessor = getattr(type(obj), overrider, None) if accessor is None: name2 = self._check_attr(obj, name) if not self._config[param] or not name2: raise AttributeError("cannot access %r" % (name,)) accessor = default name = name2 return accessor(obj, name, *args) # # request handlers # def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, oid, count=1): self._local_objects.decref(oid) def _handle_repr(self, oid): return repr(self._local_objects[oid]) def _handle_str(self, oid): return str(self._local_objects[oid]) def _handle_cmp(self, oid, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(self._local_objects[oid], other) obj = self._local_objects[oid] try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, oid): return hash(self._local_objects[oid]) def _handle_call(self, oid, args, kwargs=()): return self._local_objects[oid](*args, **dict(kwargs)) def _handle_dir(self, oid): return tuple(dir(self._local_objects[oid])) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, oid, name): return self._access_attr(oid, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, oid, name, value): return self._access_attr(oid, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, oid, name, args, kwargs): return self._handle_getattr(oid, name)(*args, **dict(kwargs)) def _handle_pickle(self, oid, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return pickle.dumps(self._local_objects[oid], proto) def _handle_buffiter(self, oid, count): items = [] obj = self._local_objects[oid] i = 0 try: while i < count: items.append(next(obj)) i += 1 except StopIteration: pass return tuple(items) def _handle_oldslicing(self, oid, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(oid, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(oid, fallback) return getslice(start, stop, *args) # collect handlers _HANDLERS = {} for name, obj in dict(locals()).items(): if name.startswith("_handle_"): name2 = "HANDLE_" + name[8:].upper() if hasattr(consts, name2): _HANDLERS[getattr(consts, name2)] = obj else: raise NameError("no constant defined for %r", name) del name, name2, obj
plugin.py
# Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO # ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Neither the name of the SONATA-NFV, 5GTANGO # nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # This work has been performed in the framework of the SONATA project, # funded by the European Commission under Grant number 671517 through # the Horizon 2020 and 5G-PPP programmes. The authors would like to # acknowledge the contributions of their colleagues of the SONATA # partner consortium (www.sonata-nfv.eu). # # This work has been performed in the framework of the 5GTANGO project, # funded by the European Commission under Grant number 761493 through # the Horizon 2020 and 5G-PPP programmes. The authors would like to # acknowledge the contributions of their colleagues of the 5GTANGO # partner consortium (www.5gtango.eu). import logging import json import time import os import threading from sonmanobase import messaging logging.basicConfig(level=logging.INFO) LOG = logging.getLogger("son-mano-base:plugin") LOG.setLevel(logging.DEBUG) class ManoBasePlugin(object): """ Abstract class that should be inherited by other MANO plugins. This class provides basic mechanisms to - connect to the broker - send/receive async/sync request/response calls - send/receive notifications - register / de-register plugin to plugin manager It also implements a automatic heartbeat mechanism that periodically sends heartbeat notifications. """ def __init__(self, name="son-plugin", version=None, description=None, auto_register=True, wait_for_registration=True, start_running=True, auto_heartbeat_rate=0.5): """ Performs plugin initialization steps, e.g., connection setup :param name: Plugin name prefix :param version: Plugin version :param description: A description string :param auto_register: Automatically register on init :param wait_for_registration: Wait for registration before returning from init :param auto_heartbeat_rate: rate of automatic heartbeat notifications 1/n seconds. 0=deactivated :return: """ self.name = "%s.%s" % (name, self.__class__.__name__) self.version = version self.description = description self.uuid = None # uuid given by plugin manager on registration self.state = None # the state of this plugin READY/RUNNING/PAUSED/FAILED LOG.info( "Starting MANO Plugin: %r ..." % self.name) # create and initialize broker connection while True: try: self.manoconn = messaging.ManoBrokerRequestResponseConnection(self.name) break except: time.sleep(1) # register subscriptions LOG.info("Plugin connected to broker.") self.declare_subscriptions() # register to plugin manager if auto_register: while self.uuid is None: self.register() if wait_for_registration: self._wait_for_registration() # kick-off automatic heartbeat mechanism self._auto_heartbeat(auto_heartbeat_rate) # jump to run if start_running: LOG.info("Plugin running...") self.run() def __del__(self): """ Actions done when plugin is destroyed. :return: """ # de-register this plugin self.deregister() self.manoconn.stop_connection() self.manoconn.stop_threads() del self.manoconn def _auto_heartbeat(self, rate): """ A simple periodic heartbeat mechanism. (much room for improvements here) :param rate: rate of heartbeat notifications :return: """ if rate <= 0: return def run(): while True: if self.uuid is not None: self._send_heartbeat() time.sleep(1/rate) # run heartbeats in separated thread t = threading.Thread(target=run) t.daemon = True t.start() def _send_heartbeat(self): self.manoconn.notify( "platform.management.plugin.%s.heartbeat" % str(self.uuid), json.dumps({"uuid": self.uuid, "state": str(self.state)})) def declare_subscriptions(self): """ Can be overwritten by subclass. But: The this superclass method should be called in any case. """ # plugin status update subscription self.manoconn.register_notification_endpoint( self.on_plugin_status_update, # call back method "platform.management.plugin.status") def run(self): """ To be overwritten by subclass """ # go into infinity loop (we could do anything here) while True: time.sleep(1) def on_lifecycle_start(self, ch, method, properties, message): """ To be overwritten by subclass """ LOG.debug("Received lifecycle.start event.") self.state = "RUNNING" def on_lifecycle_pause(self, ch, method, properties, message): """ To be overwritten by subclass """ LOG.debug("Received lifecycle.pause event.") self.state = "PAUSED" def on_lifecycle_stop(self, ch, method, properties, message): """ To be overwritten by subclass """ LOG.debug("Received lifecycle.stop event.") self.deregister() os._exit(0) def on_registration_ok(self): """ To be overwritten by subclass """ LOG.debug("Received registration ok event.") pass def on_plugin_status_update(self, ch, method, properties, message): """ To be overwritten by subclass. Called when a plugin list status update is received from the plugin manager. """ LOG.debug("Received plugin status update %r." % str(message)) def register(self): """ Send a register request to the plugin manager component to announce this plugin. """ message = {"name": self.name, "version": self.version, "description": self.description} self.manoconn.call_async(self._on_register_response, "platform.management.plugin.register", json.dumps(message)) def _on_register_response(self, ch, method, props, response): """ Event triggered when register response is received. :param props: response properties :param response: response body :return: None """ response = json.loads(str(response)) if response.get("status") != "OK": LOG.debug("Response %r" % response) LOG.error("Plugin registration failed. Exit.") exit(1) self.uuid = response.get("uuid") # mark this plugin to be ready to be started self.state = "READY" LOG.info("Plugin registered with UUID: %r" % response.get("uuid")) # jump to on_registration_ok() self.on_registration_ok() # subscribe to start topic self._register_lifecycle_endpoints() # start heartbeat mechanism self._send_heartbeat() def deregister(self): """ Send a deregister event to the plugin manager component. """ LOG.info("De-registering plugin...") message = {"uuid": self.uuid} self.manoconn.call_async(self._on_deregister_response, "platform.management.plugin.deregister", json.dumps(message)) def _on_deregister_response(self, ch, method, props, response): """ Event triggered when de-register response is received. :param props: response properties :param response: response body :return: None """ response = json.loads(str(response)) if response.get("status") != "OK": LOG.error("Plugin de-registration failed. Exit.") exit(1) LOG.info("Plugin de-registered.") def _wait_for_registration(self, timeout=5, sleep_interval=0.1): """ Method to do active waiting until the registration is completed. (not nice, but ok for now) :param timeout: max wait :param sleep_interval: sleep interval :return: None """ # FIXME: Use threading.Event() for this? c = 0 LOG.debug("Waiting for registration (timeout=%d) ..." % timeout) while self.uuid is None and c < timeout: time.sleep(sleep_interval) c += sleep_interval def _register_lifecycle_endpoints(self): if self.uuid is not None: # lifecycle.start self.manoconn.register_notification_endpoint( self.on_lifecycle_start, # call back method "platform.management.plugin.%s.lifecycle.start" % str(self.uuid)) # lifecycle.pause self.manoconn.register_notification_endpoint( self.on_lifecycle_pause, # call back method "platform.management.plugin.%s.lifecycle.pause" % str(self.uuid)) # lifecycle.stop self.manoconn.register_notification_endpoint( self.on_lifecycle_stop, # call back method "platform.management.plugin.%s.lifecycle.stop" % str(self.uuid))
grid.py
import pygame import math, random, scipy, numpy, Queue, threading, logging, os from numpy import linalg from objects import map from decimal import * from Queue import Queue from threading import Thread module_logger = logging.getLogger('App.Grid') #Return grid with tiles class Grid(object): def __init__(self,surface,font,notifier,tile_size=50,tile_value=False,neighbor_rad=False,nt=1,lb=1,rb=10): self.logger = logging.getLogger('App.Grid.g') self.font = font self.notifier = notifier self.surface = surface self.s_rect = surface.get_rect() self.tile_s = tile_size self.nt = nt self.lb = lb self.rb = rb self.q = Queue() self.w = self.s_rect.w/tile_size #grid width self.h = self.s_rect.h/tile_size #grid height self.v = tile_value #tile value: [1] - color, [2] - type self.g = [] #grid self.tiles = pygame.sprite.Group() self.on_generate() if neighbor_rad != False: self.sfn(neighbor_rad) self.tiles.update() def on_generate(self): for x in range(self.w): self.g.append([]) # print '---------',x for y in range(self.h): # print self.a if self.v==False: newtile = Dummy_Tile(self.surface,self.font,self.tile_s,self.notifier,x,y) self.tiles.add(newtile) self.g[x].append(newtile) elif self.v == 'grass': newtile = Grass(self.surface,self.font,self.tile_s,self.notifier,x,y,lb=self.lb,rb=self.rb) self.tiles.add(newtile) self.g[x].append(newtile) # self.t = Thread(target=newtile.update) # self.t.daemon = True # self.t.start() # self.t.join() # for t in self.g: # t.color = 120,120,0 # pygame.display.update() # self.tiles.update() #search for neighbors algoritm #Radius for search: neighbor_rad = (x,y) def sfn(self,neighbor_rad): progr = 0 dx = neighbor_rad[0] dy = neighbor_rad[1] for dxx in range(dx): for dyy in range(dy): for x in range(self.w): for y in range(self.h): self.g[x][y].neighbor_rad = neighbor_rad d1 = (x+(dxx+1),y) d1d = dxx+1,0 d2 = (x+(dxx+1),y+(dyy+1)) d2d = dxx+1,dyy+1 d3 = (x,y+(dyy+1)) d3d = 0,dyy+1 d4 = (x-(dxx+1),y+(dyy+1)) d4d = -1*(dxx+1),dyy+1 d5 = (x-(dxx+1),y) d5d = -1*(dxx+1),0 d6 = (x-(dxx+1),y-(dyy+1)) d6d = -1*(dxx+1),-1*(dyy+1) d7 = (x,y-(dyy+1)) d7d = 0,-1*(dyy+1) d8 = (x+(dxx+1),y-(dyy+1)) d8d = dxx+1,-1*(dyy+1) if self.nt == 1: d = (d1,d1d),(d2,d2d),(d3,d3d),(d4,d4d),(d5,d5d),(d6,d6d),(d7,d7d),(d8,d8d) elif self.nt == 2: d = (d2,d2d),(d4,d4d),(d6,d6d),(d8,d8d) elif self.nt == 3: d = (d1,d1d),(d3,d3d),(d5,d5d),(d7,d7d) elif self.nt == 4: d = (d1,d1d),(d5,d5d) elif self.nt == 5: d = (d3,d3d),(d7,d7d) elif self.nt == 6: d = (d2,d2d),(d3,d3d),(d4,d4d) for dd in d: if 0 <= dd[0][0] <= self.w-1 and 0 <= dd[0][1] <= self.h-1: ul = self.unit_vector((dx,dy),(dd[1][0],dd[1][1])) app = (dd[0][0],dd[0][1]),((dxx+1),(dyy+1)),ul self.g[x][y].neighbors.append(app) progr += 1 self.logger.info(msg=str(progr)) # print progr def unit_vector(self,rad,tile): b = numpy.array(tile) a = numpy.array(rad) al = numpy.linalg.norm(a) u = b/al ul = numpy.linalg.norm(u) return ul class Dummy_Tile(pygame.sprite.Sprite,object): def __init__(self,surface,font,tile_size,notifier,x,y): pygame.sprite.Sprite.__init__(self) self.surface = surface self.font = font self.notifier = notifier self.rect = pygame.Rect ((x*tile_size,y*tile_size),(tile_size,tile_size)) self.me = x,y self.var_tuple = 'grid.g[',str(x),'][',str(y),']' self.var_string = ''.join(self.var_tuple) self.notifier.bind_on(x,y,'color',callback=self.callback) self.notifier.bind_on(x,y,'symbol',callback=self.callback) self.q = Queue() # print 'Create callback',self.var_string self.color = 0,15,0 self.symbol = 'O' def log_tile(self,**kwarg): name = ('App.Grid.g',str(self.me)) logger = logging.getLogger(''.join(name)) keys = sorted(kwarg.keys()) for kw in keys: #kwarg: msg = (str(kw),':',str(kwarg[kw])) msg_s = ''.join(msg) logger.info(msg_s) def callback(self,tile,var_name,new_value): # print 'Ouch! My name is ',self.var_string self.update() #Update model def set_symbol(self): self.symbol = 'O' # if self.symbol == 'O': # self.symbol = 'H' # elif self.symbol == 'X': # self.symbol = 'V' # elif self.symbol == 'G': # self.symbol = 'K' # else: # self.symbol = 'O' def set_color(self,color): self.color = color # self.set_symbol() self.render(self.surface) # self.update() def update(self): # self.q.put(True) self.render(self.surface) # pass #Update view def render(self,surface): surface.fill(color=(0,0,0),rect=self.rect) self.text = self.font.render(self.symbol,True,self.color) self.text_x = self.rect.centerx-self.text.get_size()[0]*0.5 self.text_y = self.rect.centery-self.text.get_size()[1]*0.5 surface.blit(self.text,(self.text_x,self.text_y)) pygame.display.update(self.rect) class Grass(Dummy_Tile,object): def __init__(self,surface,font,tile_size,notifier,x,y,lb=1,rb=10,debug=False): super(Grass,self).__init__(surface,font,tile_size,notifier,x,y) self.neighbors = [] self.lb = lb self.rb = rb self.dbg = debug # print 'Init complete:',self.me self.log_tile(Init='complete') def neighborhood(self): pass def update(self): super(Grass,self).update() self.voayor() def voayor(self): for neighbor in self.neighbors: self.notifier.bind_on(neighbor[0][0],neighbor[0][1],'color',callback=self.spoocked) def spoocked(self,tile,var_name,new_value): # os.system('clear') # self.log_tile(Tile=self.me) # self.q.put(True) # msg = '----------','\n',str(self.me),'----------' # msg_s = ''.join(msg) # logging.info(msg) # logging.info(msg_s) # if self.dbg != False: # print '----------',self.me,'----------' grad = 0.8 if var_name == 'color': for ne in self.neighbors: # print ne if ne[0][0] == tile[0] and ne[0][1] == tile[1]: dist = ne[2] #self.logger.info(ne) #self.logger.info(round(ne[2],2)) # if self.dbg != False: # print ne # print round(ne[2],2) nR = new_value[0]-new_value[0]*dist nG = new_value[1]-new_value[1]*dist nB = new_value[2]-new_value[2]*dist oR = self.color[0] oG = self.color[1] oB = self.color[2] # R = (nR+oR)/2 # G = (nG+oG)/2 # B = (nB+oB)/2 wR = (nR/255)*10+self.lb,(oR/255)*10+self.rb #self.logger.info(Red_w = str(wR)) # if self.dbg != False: # print 'Red weight: ',wR wG = (nG/255)*10+self.lb,(oG/255)*10+self.rb # if self.dbg != False: # print 'Green weight: ',wG wB = (nB/255)*10+self.lb,(oB/255)*10+self.rb # if self.dbg != False: # print 'Blue weight: ',wB R = numpy.average((nR,oR),weights=wR) G = numpy.average((nG,oG),weights=wG) B = numpy.average((nB,oB),weights=wB) self.set_color((R,G,B)) # os.system('clear') self.log_tile(RedW=wR,GreenW=wG,BlueW=wB,Red=R,Green=G,Blue=B) # self.log_tile(Red=self.color[0],Green=self.color[1],Blue=self.color[2]) # if self.dbg != False: # print 'My color: ',R,'',G,'',B # if self.dbg != False: # print '**********************' # print 'Boo, Scarry Terry' pass
compare_num_layer_haar_multiprocessing_sgd.py
import qiskit import numpy as np import sys import multiprocessing sys.path.insert(1, '../') import qtm.base, qtm.constant, qtm.ansatz, qtm.fubini_study, qtm.encoding def run_haar(num_layers, num_qubits): psi = 2*np.random.rand(2**num_qubits)-1 # Haar thetas = np.ones(num_qubits*num_layers*5) psi = psi / np.linalg.norm(psi) encoder = qtm.encoding.Encoding(psi, 'amplitude_encoding') loss_values_haar = [] thetass_haar = [] for i in range(0, 400): if i % 20 == 0: print('Haar (' + str(num_layers) + ' layer): ', i) qc = qiskit.QuantumCircuit(num_qubits, num_qubits) # G = qtm.fubini_study.calculate_linear_state(qc.copy(), thetas, num_layers) qc = encoder.qcircuit grad_loss = qtm.base.grad_loss( qc, qtm.ansatz.create_haarchecker_linear, thetas, num_layers = num_layers, encoder = encoder) # grad1 = np.real(np.linalg.inv(G) @ grad_loss) thetas -= qtm.constant.learning_rate*grad_loss qc_copy = qtm.ansatz.create_haarchecker_linear(qc.copy(), thetas, num_layers, encoder) loss = qtm.loss.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits)))) loss_values_haar.append(loss) thetass_haar.append(thetas.copy()) traces_haar, fidelities_haar = [], [] for thetas in thetass_haar: # Get |psi> = U_gen|000...> qc = qiskit.QuantumCircuit(num_qubits, num_qubits) qc = qtm.ansatz.create_linear_state(qc, thetas, num_layers = num_layers) psi , rho_psi = qtm.base.extract_state(qc) # Get |psi~> = U_target|000...> qc1 = encoder.qcircuit psi_hat , rho_psi_hat = qtm.base.extract_state(qc1) # Calculate the metrics trace, fidelity = qtm.base.get_metrics(psi, psi_hat) traces_haar.append(trace) fidelities_haar.append(fidelity) print('Writting ... ' + str(num_layers)) np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/loss_values_haar.csv", loss_values_haar, delimiter=",") np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/thetass_haar.csv", thetass_haar, delimiter=",") np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/traces_haar.csv", traces_haar, delimiter=",") np.savetxt("../../experiments/linear_ansatz_15layer_sgd/" + str(num_layers) + "/fidelities_haar.csv", fidelities_haar, delimiter=",") if __name__ == "__main__": # creating thread num_qubits = 5 num_layers = [1, 2, 3, 4, 5] t_haar = [] for i in num_layers: t_haar.append(multiprocessing.Process(target = run_haar, args=(i, num_qubits))) for i in range(0, len(num_layers)): t_haar[i].start() for i in range(0, len(num_layers)): t_haar[i].join() print("Done!")
papersearch.py
# imports - core import os, sys import logging import ast import cPickle as pickle from random import random as rand log = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) import time from numpy import array as mat, pi, cos, sin, \ arctan2 as atan2, linspace, meshgrid as mesh from numpy.linalg import norm from numpy.random import uniform, seed import itertools as it # imports - anoto filePath, fileName=os.path.split(__file__) #you are here sys.path.append(os.path.normpath(os.path.join(filePath, '../int'))) import robot_com from robot_com.ftp.robotFTP import robotFileTransfer from robot_com.serial.arap import robotSerial # imports - master thesis sys.path.append(os.path.normpath(os.path.join(filePath, '../int/master_thesis_code/source'))) from denavit_hartenberg140 import forward_kinematics, DH_TABLE as irb140_params from helperfunctions_math import rotation_matrix_rot_tilt_skew as R_rts, matmul from standardplot import StPlot from threading import Thread, Lock, RLock # imports - newer thesis files from ed_probe import PenED, rx_ry_rz as rot_xyz class Robot: # inits def _upload_files(self): ftp = robotFileTransfer(debug_state = debug) ftp.connect() ftp.upload('ARAPMOD.prg', robot_com.paths['serial']) ftp.upload('hptr_control.mod', robot_com.paths['server']) ftp.upload('common.mod', robot_com.paths['server']) ftp.upload('calibrationdata.mod', robot_com.paths['server']) ftp.disconnect() self._ftp = ftp def _init_serial(self): pass arap = robotSerial(debug_state = debug) self._arap = arap arap.load('hptr_control.mod') arap.load('hptr_control.mod') # perform 2 times if pen is inited before robot arap.load('common.mod') arap.load('calibrationdata.mod') arap.command('initHaptor') self.move_to_door() # constructor def __init__(self,lock = None, angle=45, pen_interface=None, num_data=16): self._upload_files() self._init_serial() self.lock = lock self.all_data = [] self.start_time = time.strftime('%H%M') self.alive = False self._finished = False self.num_data_points = num_data if pen_interface: self.pen = pen_interface self.pen_hit_thread = Thread(target=pen.check_hit) self.pen_hit_thread.start() time.sleep(1) with self.lock: self.alive = self.pen.alive if pen.alive: self.move_to_ready(angle) self.save_tool_pos() # destructor def __del__(self): print('Robot shutdown...') if self.all_data: print('Saving data...') if self._finished: filename = "measurement_%d%m%Y_{}_%H%M.pickle".format(self.start_time) else: filename = "measurement_%d%m%Y_{}_%H%M_abort.pickle".format(self.start_time) # remove the first coordinate, # since it will become dupliate coordinate # during multiple runs, pen angles are also ill-conditioned for rot/skew self.all_data.pop(0) with open(time.strftime(filename),'wb+') as fp: pickle.dump(self.all_data, fp) else: print('No data to save!') # prepare shutdown self.move_to_door() self.pen_hit_thread.join() # call dels del self._arap self._arap = None del self._ftp self._tp = None print('Shutdown complete!') return # properties @property def arap(self): return self._arap # functions def __abort(self): if not self.pen: return pen._abort = True def move_to_door(self): self.arap.command('changePen') self.save_tool_pos() def set_vel_molusk(self): if not self.alive: return self.set_vel(0.01) self.set_ang_vel(30) def set_vel_parrot(self): if not self.alive: return self.set_vel(60) self.set_ang_vel(30) def set_vel(self, vel): if not self.alive: return self.arap.command('setVel', vel) def set_ang_vel(self, vel): if not self.alive: return self.arap.command('setAngVel', vel) def set_rel_tcp_z(self, z): if not self.alive: return self.arap.command('SetRelTCPz', '{:0.3f}'.format(z)) def save_tool_pos(self): self.arap.command('SavePos') def move_to_saved_pos(self): if not self.alive: return self.arap.command('MoveSavedPos') def move_to_saved_pos_ori(self): if not self.alive: return self.arap.command('MoveSavedPosOri') def move_to_saved_ori(self): if not self.alive: return self.arap.command('MoveSavedOri') def move_tcp(self, x,y,z, speed): if not self.alive: return if speed == 'normal': self.arap.command('MoveTCPNormal', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) elif speed == 'slow': self.arap.command('MoveTCPSlow', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) def move_wobj_tcp(self, x,y,z): if not self.alive: return self.arap.command('MoveWObjTCP', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) def move_flange(self, x,y,z): if not self.alive: return self.arap.command('MoveFlange', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) def rel_tool_ori(self, rx, ry, rz): if not self.alive: return self.arap.command('RelToolOri', '[{0:0.2f},{1:0.2f},{2:0.2f}]'.format(rx, ry, rz)) def rel_tool_dir(self, x,y,z): if not self.alive: return self.arap.command('RelToolPos', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) def rel_tool_z(self, z): if not self.alive: return self.arap.command('RelZPos', '{:0.3f}'.format(z)) def abs_tool_dir(self, x,y,z): if not self.alive: return self.arap.command('RelBasePos', '[{0:0.3f},{1:0.3f},{2:0.3f}]'.format(x,y,z)) def move_to_ready(self, angle): if not self.alive: return self.arap.command('SetJ123', [0,0,0]) self.arap.command('SetJzero') self.arap.command('SetJ5', angle) print self.get_tcp() def get_flange(self): while True: try: res = self.arap.command('getFlange') i0 = res.find('[') i1 = res.find(']')+1 res = res[i0 : i1] res = ast.literal_eval(res) if not(len(res) == 3): raise Exception('Not valid FLANGE format (length: {}'.format(len(res))) return mat(res) except Exception as e: print 'Failed to obtain Flange-values!' print str(e) def get_tcp(self): while True: try: res = self.arap.command('getTCP') i0 = res.find('[') i1 = res.find(']')+1 res = res[i0 : i1] res = ast.literal_eval(res) if not(len(res) == 3): raise Exception('Not valid TCP format (length: {}'.format(len(res))) return mat(res) except Exception as e: print 'Failed to obtain TCP-values!' print str(e) def get_joints(self): while True: try: res = self.arap.command('getJ') i0 = res.find('[') i1 = res.find(']')+1 res = res[i0 : i1] res = ast.literal_eval(res) if not(len(res) == 6): raise Exception('Not valid joints format (length: {}'.format(len(res))) return mat(res) except Exception as e: print 'Failed to obtain joint-values!' print str(e) def __grab_data(self): with self.lock: #these might need a few tries to get a valid result J = self.get_joints() tcp = self.get_tcp() flange = self.get_flange() pos = self.pen.position if pos is None: return None pos = mat(pos) ori = self.pen.orientation if ori is None: return None ori = mat(ori) fsr = self.pen.fsr if fsr is None: return None fsr_adc = self.pen.fsr_adc if fsr_adc is None: return None perspective = self.pen.perspective if perspective is None: return None data = { 'pen_pos': pos, 'pen_ori': ori, 'pen_fsr': fsr, 'pen_fsr_adc': fsr_adc, 'pen_perspective': perspective, 'robot_joints': J, 'robot_tcp': tcp, 'robot_flange': flange } return data def __find_start_pos(self, amount, height): if (not self.pen) or (not self.arap): return elif self.pen: if not self.pen.alive: return log.info('preparing SEARCH MODE.') self.save_tool_pos() while True: if not self.pen.alive: log.error('LOST CONNCTION TO PEN, ABORTING.') break if self.pen.hit: self.tool_height = height self.rel_tool_dir(0, 0, height) # move back 10mm self.save_tool_pos() log.info('Found starting point!') return else: self.arap.command('DispDistSaved') self.set_vel(10) self.rel_tool_dir(0,0, -amount) # move forward by 'amount' return def __search_tool_z(self, amount, _grid, _angles, _start_index): if (not self.pen) or (not self.arap): return elif self.pen: if not self.pen.alive: return if (_start_index is None): index = 0 else: index = _start_index if (_grid is None): disp_pos = mat([[10,0],[0,10],[-10,0],[0,-10]]) else: disp_pos = _grid if (_angles is None): angles = [[uniform(-20,20), uniform(-20,20), uniform(-180, 180)]for _ in xrange(len(disp_pos))] else: angles = _angles log.info('SEARCH MODE.') print 'Number of angles: {}'.format(len(angles)) self.save_tool_pos() self._finished = True # This remains if everything works assert(len(angles) == len(disp_pos)) while not (len(self.all_data) == len(angles)-_start_index): if not self.pen.alive: print 'CURRENT INDEX: {}'.format(index) log.error('LOST CONNCTION TO PEN, ABORTING.') self._finished = False break if self.pen.hit: data = self.__grab_data() if data is None: print 'FAILED MEASUREMENT - TRYING AGAIN!' continue self.all_data.append(data) print 'HIT!\a' print 'CURRENT INDEX: {}'.format(index) for key in data: print '\n{}: {}'.format(key, data[key]) self.set_vel(60) self.move_to_saved_pos() self.move_to_saved_ori() pos = disp_pos[index % len(disp_pos)] ang = angles[index % len(angles)] if (_grid is None): if (index+1) % 4 == 0: perturb = mat((rand()*10-5, rand()*10-5)) pos = pos + perturb if (_grid is None): self.abs_tool_dir(pos[0],pos[1], 0) else: self.move_wobj_tcp(pos[0], pos[1], self.tool_height) self.save_tool_pos() self.rel_tool_ori(*ang) index = index+1 else: self.arap.command('DispDistSaved') self.set_vel(10) self.rel_tool_dir(0,0, -amount) self.move_to_saved_pos() self.__abort() return def __rel_toolz_ori(self, rel_z, rx,ry,rz): self.set_rel_tcp_z(rel_z) self.rel_tool_ori(rx, ry, rz) self.set_rel_tcp_z(-rel_z) def paper_verify(self): self.__find_start_pos(0.1, height=30) self.__rel_toolz_ori(30, 0, -20, 0) self.rel_tool_z(10) self.rel_tool_z(10) self.move_to_saved_pos_ori() self.__abort() def paper_search(self, grid=None, angles=None, start_index=None, start_height=None): self.__find_start_pos(0.1, height=start_height) self.__search_tool_z(0.1, _grid = grid, _angles = angles, _start_index = start_index) return def parrot(self, prints=False): if (not self.pen) or not(self.arap): return elif self.pen: if not self.pen.alive: return log.info('PARROT MODE.') while True: if self.pen.hit: with self.lock: mxy = mat((353455349.28125, 363.38671875)) pos = (self.pen.pos-mxy)*0.3 L = norm(pos) x,y = pos if y < -74: self.abs_tool_dir(0, 0, -x) else: self.abs_tool_dir(x, -y, 0) if prints: J = self.get_joints() tcp = self.get_tcp() flange = self.get_flange() print 'Joints: {}'.format(J) print 'TCP: {}'.format(tcp) print 'Flange: {}'.format(flange) else: pass return def position_grid(xmin, xmax, ymin, ymax, num): x = linspace(xmin,xmax,num) y = linspace(ymin,ymax,num) xy = mat(mesh(x,y)).T #shape: x_grid, y_grid, grid_index/layer return xy def convert_grid(grid): a,b,c = grid.shape xy = grid.reshape(a*b,c) return xy def define_angles(tilt, skew): return [ [tilt,0,90+skew], [-tilt,0,90+skew], [0,tilt,90+skew], [0,-tilt,90+skew], [tilt,0,-90-skew],[-tilt,0,-90-skew],[0,tilt,-90-skew], [0,-tilt,-90-skew], [tilt,0,45+skew], [-tilt,0,45+skew], [0,tilt,45+skew], [0,-tilt,45+skew], [tilt,0,45-180+skew], [-tilt,0,45-180+skew], [0,tilt,45-180+skew], [0,-tilt,45-180+skew], [tilt,0,-45+180-skew],[-tilt,0,-45+180-skew],[0,tilt,-45+180-skew], [0,-tilt,-45+180-skew] ] if __name__ == '__main__': seed(123456789*3) debug = False shared_lock = RLock() pen = PenED(hover_mode=True,lock = shared_lock) #Put after robot inits, or perform LOAD 2 times the first time robot = Robot(angle=90, pen_interface=pen, lock = shared_lock) ## generate grids f = 7.0 *(25.4/600.0) gridmm = position_grid(0, f*(4572.2 - 3619.8), #282.22786666666656 0, f*(3917.4 - 3262.7), #194.00943333333342 11) gridad = position_grid(3619.8, 4572.2, 3262.7, 3917.4, 11) ## extract subgrids and convert from (a,b,c) shape to (a*b,c) shape # gridmm = gridmm[1:-1, 2:-3, :] # gridad = gridad[1:-1, 2:-3, :] gridmm = convert_grid(gridmm) gridad = convert_grid(gridad) # gridmm = gridmm[1::2] # gridad = gridad[1::2] ## comment/uncomment these lines ## if we want distorted grids # gridmm_pert = mat([uniform(-5,5) for _ in xrange(len(gridmm)*2)]).reshape(len(gridmm),2) # gridad_pert = gridmm_pert/f # gridmm = gridmm + gridmm_pert # gridad = gridad + gridad_pert angles = None ## comment/uncomment these lines ## if we want symmtrical angles # num_chunks = len(define_angles(0,0)) # num_angle_chunks = int(len(gridmm) / num_chunks) # angles = mat([define_angles(uniform(-20,20), uniform(-90,90)) for _ in xrange(num_angle_chunks+1)]) # a,b,c = angles.shape # angles = angles.reshape(a*b,c) # angles = angles[:len(gridmm)] # print '\nAngles:\n{}'.format(angles) ## uncommnt this for ## checking calibration ## These were used for measuring accuracy of wobj calibration # robot.rel_tool_ori(20,0,-45) #xtilt # robot.rel_tool_ori(0,20,0) #ytilt # robot.rel_tool_ori(20,20,-45) #xytilt robot.move_wobj_tcp(*(list(gridmm[-1])+[-0.6])) while True: pass ## uncommnt this for ## checking grid placement # for p in gridmm: # pos = list(p) + [5]; # robot.move_wobj_tcp(*pos) # 1/0 # choose starting index index=0 print 'pos mm: {}'.format(gridmm[index]) print 'pos ad: {}'.format(gridad[index]) # start measuring robot.move_wobj_tcp(*(list(gridmm[index])+[3])) robot.paper_search(start_height = 3, start_index = index, grid = gridmm, angles = angles)
task_bulter.py
#!/usr/bin/env python #coding:utf-8 """ Author: --<v1ll4n> Purpose: Manage Task Created: 2016/12/13 """ import time from time import sleep import unittest from multiprocessing import Pipe from threading import Thread from pprint import pprint from inspect import getmembers from . import exceptions from .process_task import ProcessTask from .utils_class import Singleton from .process_task import testfun UPDATE_TASK_STATUS_DAEMON = 'update_task_status_daemon' ######################################################################## class TaskBulter(Singleton): """""" _tasks_table = {} _tasks_status = {} _daemon_threads = {} _result_tables = {} #---------------------------------------------------------------------- def __init__(self, threads_update_interval=0): """Constructor""" self._threads_update_interval = threads_update_interval self._closed = False #self._initial_deamon_threads() #---------------------------------------------------------------------- def _daemon_start(self, name, func): """""" if name in self._daemon_threads: pass else: ret = Thread(name=name, target=func) ret.daemon = True ret.start() self._daemon_threads[name] = ret ##---------------------------------------------------------------------- #def _initial_deamon_threads(self): #"""""" ##raise NotImplemented #self._daemon_start(name=UPDATE_TASK_STATUS_DAEMON, #func=self._update_tasks_status) #---------------------------------------------------------------------- def _upload_result_table(self): """""" taskslist = self._result_tables.keys() for task_id in taskslist: self.get_result(task_id) #---------------------------------------------------------------------- def _update_tasks_status(self): """""" re_update = False for i in self._tasks_table.items(): pipe = i[1]['status_monitor_pipe'] #if self._tasks_table[i[0]]['process_instance'].exitcode == None: last = {} ret = {} while pipe.poll(): ret = pipe.recv() #ret['timestamp'] = time.time() if ret != {}: pass else: ret = {} if not self._tasks_status[i[0]]: self._tasks_status[i[0]] = {} # # Clean last buffer record # _ = self._tasks_status[i[0]].copy() try: del _['last'] except KeyError: pass last = _ # # record # self._tasks_status[i[0]]['now'] = ret if isinstance(self._tasks_status[i[0]], dict): self._tasks_status[i[0]]['last'] = last #else: # pass # # Check Re-update? # try: lasttimestamp = int(self._tasks_status[i[0]]['now']['timestamp']) except KeyError: lasttimestamp = int(time.time()) if int(time.time()) - \ lasttimestamp \ <= 3: re_update = False else: re_update = True if re_update: self._update_tasks_status() #---------------------------------------------------------------------- def start_task(cls, id, target, args=tuple(), kwargs={}, result_callback=None): """Start A task(Process) Params: id: the ID of task (identify the process_task) :type: str target: the task function :type: function args: the vargs of target :type: tuple kwargs: the keywords of target :type: dict""" if not callable(target): exceptions.TaskCannotBeCalled if id in cls._tasks_table: raise exceptions.ExistedTaskId # # create pipe # control_pipe, child_pipe = Pipe(duplex=False) result_recv_pipe, result_send_pipe = Pipe(duplex=False) # # init tables # cls._tasks_table[id] = {} cls._result_tables[id] = {} cls._tasks_status[id] = {} cls._tasks_table[id]['status_monitor_pipe'] = control_pipe cls._result_tables[id]['result_pipe'] = result_recv_pipe cls._result_tables[id]['result'] = [] # # Build process and run # task_process = ProcessTask(id, target, args=args, kwargs=kwargs, status_monitor_pipe=child_pipe, threads_update_interval=cls._threads_update_interval, result_pipe=result_send_pipe, result_hook_function=result_callback) cls._tasks_table[id]['process_instance'] = task_process task_process.daemon = True task_process.start() #---------------------------------------------------------------------- def get_tasks_status(self): """""" self._update_tasks_status() return self._tasks_status.copy() #---------------------------------------------------------------------- def get_tasks(self): """""" return self._tasks_table #---------------------------------------------------------------------- def get_task_by_id(self, id): """""" if id in self._tasks_table: return self._tasks_table[id] else: return None #---------------------------------------------------------------------- def get_result(self, task_id): """""" resultset = self._result_tables.get(task_id) if resultset: pipe = resultset.get('result_pipe') if pipe: while pipe.poll(): resultset['result'].append(pipe.recv()) return resultset.get('result') #---------------------------------------------------------------------- def destory_task(self, id_or_taskinstance): """""" if isinstance(id_or_taskinstance, ProcessTask): id_or_taskinstance.terminate() elif isinstance(id_or_taskinstance, str): _ = self.get_task_by_id(id_or_taskinstance)['process_instance'] assert isinstance(_, ProcessTask) _.terminate() #---------------------------------------------------------------------- def get_result_pipe_table(self): """""" return self._result_tables #---------------------------------------------------------------------- def close(self): """""" self._closed = True #---------------------------------------------------------------------- def destory_and_clean_task(self, id): """""" tasklist = [] if id: if id in self._tasks_table.keys(): tasklist.append(id) else: return False else: tasklist = self._tasks_table.keys() for i in tasklist: self.destory_task(tasklist) del self._tasks_status[i] del self._tasks_table[i] del self._result_tables[i] return True #---------------------------------------------------------------------- def reset(self): """""" return self.destory_and_clean_task(None) ######################################################################## class TaskBulterTest(unittest.case.TestCase): """""" #---------------------------------------------------------------------- def test_get_result(self): """""" TaskBulter().start_task('testresultrecv', result_t) sleep(1) pprint(TaskBulter().get_result('testresultrecv')) #---------------------------------------------------------------------- def result_t(): """""" for i in range(6): yield i if __name__ == '__main__': unittest.main()
GUI.py
#Author:Huangliang #Time:2018/5/25 import tkinter as tk from tkinter.filedialog import * import tkinter.messagebox import Final_Fantasy_Driver as FFD import demo_training as DT import threading window = tk.Tk() window.title('Pedestrians Detector') window.geometry('300x210') def tick(): import winsound winsound.PlaySound('./prompt_tone/3.wav', winsound.SND_ASYNC) def tick1(): import winsound winsound.PlaySound('./prompt_tone/1.wav', winsound.SND_ASYNC) def Choose(): tick() choose = var.get() if choose == 1: DEMOTRAINING() elif choose == 2: VIDEODETECTION() def DEMOTRAINING(): window1 = tk.Toplevel() window1.title('设置路径') window1.geometry('400x150') path1 = tk.StringVar() path2 = tk.StringVar() def selectPath1(): tick1() fd = LoadFileDialog(window1) # 创建打开文件对话框 filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称 print(filename) path1.set(filename) # 全局变量 print(path1.get()) def selectPath2(): tick1() fd = LoadFileDialog(window1) # 创建打开文件对话框 filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称 print(filename) path2.set(filename) # 全局变量 print(path2.get()) def training(): tick() newfilename1 = path1.get() newfilename2 = path2.get() newfilename1 = newfilename1.replace('\\','/') newfilename2 = newfilename2.replace('\\','/') print(newfilename1) print(newfilename2) window1.destroy() # 显示训练日志 window2 = tk.Toplevel() window2.title('训练中...') window2.geometry('400x600') try: # 创建子线程进行训练,不卡主线程去刷新UI new_thread_1 = threading.Thread(target=DT.main, args=(newfilename1, newfilename2, window2,)) new_thread_1.setDaemon(True) new_thread_1.start() except: print("Error: unable to start training") window2.mainloop() l0 = Label(window1, text="请保证样本集文件夹中存在.lst图片名列表文件\n").grid(row=0, column=1) l1 = Label(window1, text="POS目标路径:").grid(row=1, column=0) et1 = Entry(window1, textvariable=path1).grid(row=1, column=1) bt1 = Button(window1, text="路径选择", command=selectPath1).grid(row=1, column=2) l1 = Label(window1, text="NEG目标路径:").grid(row=2, column=0) et2 = Entry(window1, textvariable = path2).grid(row = 2, column = 1) bt2 = Button(window1, text="路径选择", command=selectPath2).grid(row=2, column=2) bt3 = Button(window1, text=" 确定 ", command=training).grid(row=3, column=2) window1.mainloop() def VIDEODETECTION(): window2 = tk.Toplevel() window2.title('设置路径') window2.geometry('290x105') path3 = tk.StringVar() def selectPath1(): tick1() fd = LoadFileDialog(window2) # 创建打开文件对话框 filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称 path3.set(filename) # 全局变量 def detection(): tick() filename = path3.get() newfilename = filename.replace('\\', '/') print(newfilename) FFD.main(newfilename) tick() tk.messagebox.showinfo('提示', '视频处理完成') l0 = Label(window2, text="请选择视频文件的路径\n").grid(row=0, column=1) l1 = Label(window2, text="原始视频路径:").grid(row=1, column=0) et1 = Entry(window2, textvariable=path3).grid(row=1, column=1) bt1 = Button(window2, text="路径选择", command=selectPath1).grid(row=1, column=2) bt3 = Button(window2, text=" 确定 ", command=detection).grid(row=2, column=2) window2.mainloop() var = tk.IntVar() l1 = Label(window,text='\n欢迎使用视频行人检测系统\n\n请选择您需要的功能\n').pack() r1 = tk.Radiobutton(window, text='训练样本',variable=var, value=1) r1.pack() r2 = tk.Radiobutton(window, text='检测视频',variable=var, value=2) r2.pack() bt = tk.Button(window, text='确定', width=15, height=2, command=Choose) bt.pack() window.mainloop()
domino_puzzle.py
import math import re import typing from operator import itemgetter from collections import defaultdict, deque from concurrent.futures import Future from concurrent.futures.process import ProcessPoolExecutor from dataclasses import dataclass from datetime import datetime, timedelta from functools import partial from itertools import chain from multiprocessing import Pool, Manager from queue import Empty from random import Random from sys import maxsize from threading import Thread from deap import base, creator, tools from deap.algorithms import eaSimple from deap.tools.support import Statistics import matplotlib from networkx.classes.digraph import DiGraph from networkx.algorithms.shortest_paths.generic import shortest_path import numpy as np import hall_of_fame # Avoid loading Tkinter back end when we won't use it. from priority import PriorityQueue matplotlib.use('Agg') import matplotlib.pyplot as plt # @IgnorePep8 class Cell(object): def __init__(self, pips): self.pips = pips self.domino = None self.board = None self.x = None self.y = None self.visited = False def __repr__(self): return 'Cell({})'.format(self.pips) def find_neighbour_cells(self, dx, dy, exclude_sibling=True): x = self.x + dx y = self.y + dy board = self.board if 0 <= x < board.width and 0 <= y < board.height: neighbour = board[x][y] if (exclude_sibling and neighbour is not None and neighbour.domino is self.domino): pass elif neighbour is not None: yield neighbour def find_neighbours(self, exclude_sibling=True): return chain( self.find_neighbour_cells(0, 1, exclude_sibling=exclude_sibling), self.find_neighbour_cells(1, 0, exclude_sibling=exclude_sibling), self.find_neighbour_cells(0, -1, exclude_sibling=exclude_sibling), self.find_neighbour_cells(-1, 0, exclude_sibling=exclude_sibling)) @property def partner(self): domino = self.domino if domino.head is self: return domino.tail return domino.head def dominates_neighbours(self): return all(self.pips >= neighbour.pips for neighbour in self.find_neighbours()) class BoardError(Exception): pass class BadPositionError(BoardError): pass class MarkerSet: def __init__(self, marker_text: str, border: int): self.border = border self.marker_locations = {} # {(x, y): marker} if not marker_text.startswith('('): markers = iter(marker_text.rstrip()) self.marker_pips = dict(zip(markers, markers)) # {marker: pips} else: self.marker_pips = {} for match in re.finditer(r'\((\d+),(\d+)\)(.)', marker_text): row = int(match.group(1)) column = int(match.group(2)) marker = match.group(3) self.marker_locations[row+border, column+border] = marker def check_markers(self, pips_or_marker: str, x: int, y: int) -> str: new_pips = self.marker_pips.get(pips_or_marker) if new_pips is None: return pips_or_marker self.marker_locations[(x+self.border, y+self.border)] = pips_or_marker return new_pips class DiceSet: def __init__(self, dice_text: str = '', border: int = 0): self.dice = {} # {(x, y): pips} for match in re.finditer(r'\((\d+),(\d+)\)(\d+)', dice_text): row = int(match.group(1)) column = int(match.group(2)) die_pips = int(match.group(3)) self.dice[row+border, column+border] = die_pips def __repr__(self): return f'DiceSet({self.text!r})' @property def text(self): return self.crop_text(0, 0) def crop_text(self, left_border: int, top_border: int): sorted_dice = sorted(self.dice.items(), key=itemgetter(1)) return ','.join(f'({x-left_border},{y-top_border}){die_pips}' for (x, y), die_pips in sorted_dice) def items(self): return self.dice.items() def move(self, *positions, show_length=True) -> str: """ Move a die through a list of positions. :param positions: ((x, y), ...) the starting position of the die, followed by one or more positions for it to turn on or stop at. :param show_length: True if move lengths are included in text :return: a description of the move described by the positions """ move_parts = [] move_count = len(positions) pips = prev_x = prev_y = 0 for i, (x, y) in enumerate(positions): if i == 0: pips = self.dice.pop((x, y)) else: dx = x - prev_x dy = y - prev_y if 0 < dx: move_part = f'R{dx}' elif dx < 0: move_part = f'L{-dx}' elif 0 < dy: move_part = f'U{dy}' else: move_part = f'D{-dy}' if not show_length: move_part = move_part[0] if i == 1: move_part = f'{pips}{move_part}' if i == move_count - 1: self.dice[x, y] = pips move_parts.append(move_part) prev_x = x prev_y = y return ''.join(move_parts) def __getitem__(self, coordinates): x, y = coordinates return self.dice.get((x, y)) def __contains__(self, coordinates): return coordinates in self.dice class ArrowSet: directions = dict(r=(1, 0), u=(0, 1), l=(-1, 0), d=(0, -1)) def __init__(self, text: str): self.text = text self.positions = [] # [[(x1, y1), (x2, y2), ...], ...] for match in re.finditer(r'\((\d+),(\d+)\)(([UDLR]\d+)+)', text): x = int(match.group(1)) y = int(match.group(2)) position = [(x, y)] moves = match.group(3) for move_match in re.finditer(r'([UDLR])(\d+)', moves): direction = move_match.group(1) distance = int(move_match.group(2)) dx, dy = self.directions[direction.lower()] x += dx*distance y += dy*distance position.append((x, y)) self.positions.append(position) def __repr__(self): return f'ArrowSet({self.text!r})' class Board(object): @classmethod def create(cls, state, border=0, max_pips=None): sections = state.split('\n---\n') dice_set = arrows = None marker_text = '' if len(sections) > 1: for line in sections[1].splitlines(): if line.startswith('dice:'): dice_text = line[5:].rstrip() dice_set = DiceSet(dice_text, border) elif line.startswith('arrows:'): arrows_text = line[7:].rstrip() arrows = ArrowSet(arrows_text) else: marker_text = line marker_set = MarkerSet(marker_text, border) lines = sections[0].splitlines(False) lines.reverse() height = (len(lines)+1) // 2 line_length = height and max(map(len, lines)) width = height and (line_length+1) // 2 lines = [line + ((line_length-len(line)) * ' ') for line in lines] board = cls(width + 2*border, height + 2*border, max_pips=max_pips, dice_set=dice_set, arrows=arrows) degrees = None for x in range(width): for y in range(height): head = lines[y*2][x*2] head = marker_set.check_markers(head, x, y) if head not in ' x#': right_joint = board.get_joint(x, y, x+1, y, border, lines) upper_joint = board.get_joint(x, y, x, y+1, border, lines) if right_joint != ' ': tail = lines[y*2][x*2+2] tail = marker_set.check_markers(tail, x+1, y) degrees = 0 elif upper_joint != ' ': tail = lines[y*2+2][x*2] tail = marker_set.check_markers(tail, x, y+1) degrees = 90 else: tail = None if tail: domino = Domino(int(head), int(tail)) domino.rotate(degrees) board.add(domino, x+border, y+border) elif board[x+border][y+border] is None: cell = Cell(int(head)) board.add(cell, x+border, y+border) board.place_markers(line_length, lines, marker_set) return board def __init__(self, width: int, height: int, max_pips: int = None, dice_set: DiceSet = None, arrows: ArrowSet = None): self.width = width self.height = height self.dominoes = [] self.max_pips = max_pips self.dice_set = dice_set self.arrows = arrows self.add_count = 0 if max_pips is None: self.extra_dominoes = [] else: self.extra_dominoes = Domino.create(max_pips) self.cells: typing.List[typing.List[typing.Optional[Cell]]] = [] for _ in range(width): self.cells.append([None] * height) # Track dominoes that aren't on the regular grid. self.offset_dominoes = [] # [(domino, x, y)] self.markers = {} self.cycles_remaining = 0 def __eq__(self, other): for x in range(self.width): for y in range(self.height): cell1 = self[x][y] cell2 = other[x][y] if cell1 is None or cell2 is None: if cell1 is not cell2: return False elif cell1.pips != cell2.pips or cell1.domino != cell2.domino: return False return True def __ne__(self, other): return not (self == other) def add(self, item: typing.Union['Domino', Cell], x: int, y: int): try: dx, dy = item.direction self.add(item.head, x, y) try: self.add(item.tail, x+dx, y+dy) except BadPositionError: # noinspection PyTypeChecker self.remove(item.head) raise self.dominoes.append(item) if self.extra_dominoes: self.extra_dominoes.remove(item) except AttributeError: if item.x is not None: # noinspection PyTypeChecker self.cells[item.x][item.y] = None if not (0 <= x < self.width and 0 <= y < self.height): raise BadPositionError( 'Position {}, {} is off the board.'.format(x, y)) if self.cells[x][y] is not None: raise BadPositionError( 'Position {}, {} is occupied.'.format(x, y)) self.cells[x][y] = item item.board = self item.x = x item.y = y def get_joint(self, x1: int, y1: int, x2: int, y2: int, border: int, lines: typing.List[str]) -> str: if y1 == y2: return ((0 < x2 < self.width-2*border) and lines[y1 * 2][x1 * 2 + 1] or ' ') return ((0 < y2 < self.height-2*border) and lines[y1 * 2 + 1][x1 * 2] or ' ') def place_markers(self, line_length, lines, marker_set): self.markers = marker_set.marker_locations for i, line in enumerate(lines): for j, c in enumerate(line): if c != '#': if not ('0' <= c <= '9'): continue if i % 2 == 0 and j % 2 == 0: continue head = c if c == '#' else int(c) right_joint = j + 1 < line_length and lines[i][j + 1] or ' ' upper_joint = i + 1 < len(lines) and lines[i + 1][j] or ' ' neighbours = [] # [(tail, degrees)] if right_joint != ' ': tail = lines[i][j + 2] degrees = 0 neighbours.append((tail, degrees)) if upper_joint != ' ': tail = lines[i + 2][j] degrees = 90 neighbours.append((tail, degrees)) for tail, degrees in neighbours: tail = tail if tail == '#' else int(tail) domino = Domino(head, tail) domino.rotate(degrees) self.offset_dominoes.append((domino, j / 2, i / 2)) # noinspection PyUnusedLocal @staticmethod def add_joint(joint: str, x1: int, y1: int, x2: int, y2: int) -> str: """ Record the joint character between a pair of cells. :return: an adjusted joint character, in '|- '. """ return joint def remove(self, item): try: self.remove(item.head) self.remove(item.tail) self.dominoes.remove(item) self.extra_dominoes.append(item) except AttributeError: # noinspection PyTypeChecker self.cells[item.x][item.y] = None item.x = item.y = item.board = None def join(self, cell1, cell2): domino = Domino(cell1, cell2) self.dominoes.append(domino) return domino def split(self, domino): self.dominoes.remove(domino) if self.max_pips is not None: self.extra_dominoes.append(domino) domino.head.domino = None domino.tail.domino = None def split_all(self): """ Split all dominoes into separate cells. Useful for Dominosa. """ for domino in self.dominoes[:]: self.split(domino) def mutate(self, random, board_type=None, matches_allowed=True): # Choose number of mutations: 1 is most common, n is least common max_mutations = len(self.dominoes) mutation_count = self.pick_mutation_count(max_mutations, random) is_successful = False new_board = None while not is_successful: # mutation_count = min(mutation_count, 3) board_type = board_type or Board neighbours = [] removed = set() for _ in range(mutation_count): if neighbours: domino = random.choice(neighbours) else: domino = random.choice(self.dominoes) removed.add(domino) neighbours = list(domino.find_neighbours()) new_board = board_type(self.width, self.height, max_pips=self.max_pips) old_dominoes = set(self.dominoes) old_dominoes.update(self.extra_dominoes) new_board.extra_dominoes = [domino for domino in new_board.extra_dominoes if domino in old_dominoes] for domino in self.dominoes: if domino not in removed: i = new_board.extra_dominoes.index(domino) new_domino = new_board.extra_dominoes[i] new_domino.rotate_to(domino.degrees) new_board.add(new_domino, domino.head.x, domino.head.y) is_successful = new_board.fill(random, matches_allowed=matches_allowed) return new_board @staticmethod def pick_mutation_count(max_mutations, random): n = random.randint(1, (max_mutations + 1) * max_mutations / 2) mutation_count = 0 dn = max_mutations while n > 0: mutation_count += 1 n -= dn dn -= 1 return mutation_count def __getitem__(self, x): return self.cells[x] def __repr__(self): return f'{self.__class__.__name__}({self.width}, {self.height})' def display(self, cropped=False, cropping_bounds=None): """ Build a display string for the board's current state. @param cropped: True if blank rows and columns around the outside should be cropped out of the display. @param cropping_bounds: a list that will be cleared and then have [xmin, ymin, xmax, ymax] appended to it. Ignored if it is None. """ xmin, xmax, ymin, ymax = self.get_bounds(cropped) if cropping_bounds is not None: cropping_bounds[:] = [xmin, ymin, xmax, ymax] width = xmax-xmin+1 height = ymax-ymin+1 marker_display = self.display_markers() are_markers_unique = not marker_display.startswith('(') display = [[' '] * (width*2-1) for _ in range(height*2-1)] for y in range(height): for x in range(width): row = (height - y - 1)*2 col = x*2 cell = self[x+xmin][y+ymin] cell_display = self.display_cell(cell, x+xmin, y+ymin, are_markers_unique) display[row][col] = cell_display if (cell is not None and cell.domino is not None and cell.domino.head == cell): dx, dy = cell.domino.direction divider = '|' if dx else '-' display[row-dy][col+dx] = divider self.adjust_display(display) main_display = ''.join(''.join(row).rstrip() + '\n' for row in display) if marker_display: main_display = f'{main_display}---\n{marker_display}\n' if self.dice_set: dice_text = self.dice_set.crop_text(xmin, ymin) main_display = f'{main_display}---\ndice:{dice_text}\n' return main_display def display_markers(self): if not self.markers: return '' marker_values = set(self.markers.values()) if len(marker_values) != len(self.markers): sorted_markers = sorted(self.markers.items(), key=itemgetter(1, 0)) marker_display = ','.join(f'({x},{y}){die_pips}' for (x, y), die_pips in sorted_markers) else: marker_items = sorted((y, x, name) for (x, y), name in self.markers.items()) marker_display = '' for y, x, name in marker_items: cell = self[x][y] pips = cell.pips if cell else 'x' marker_display += f'{name}{pips}' return marker_display def display_cell(self, cell, x, y, are_markers_unique): if cell is None: display = 'x' else: display = str(cell.pips) if are_markers_unique: return self.markers.get((x, y), display) return display def adjust_display(self, display: typing.List[typing.List[str]]): """ Adjust the display grid before it gets assembled. """ def get_bounds(self, cropped): if not cropped: xmin = ymin = 0 xmax, ymax = self.width - 1, self.height - 1 else: xmin = self.width + 1 ymin = self.height + 1 xmax = ymax = 0 positions = chain(((cell.x, cell.y) for domino in self.dominoes for cell in (domino.head, domino.tail)), self.markers) for x, y in positions: xmin = min(xmin, x) xmax = max(xmax, x) ymin = min(ymin, y) ymax = max(ymax, y) return xmin, xmax, ymin, ymax def choose_extra_dominoes(self, random): """ Iterate through self.extra_dominoes, start at random position. @return a generator of dominoes. """ dominoes = self.extra_dominoes[:] count = len(dominoes) start = random.randrange(count) for i in range(count): yield dominoes[(i + start) % count] def choose_and_flip_extra_dominoes(self, random): """ Iterate through self.extra_dominoes, start at random position. @return a generator of (domino, is_flipped) pairs. Each domino is returned twice, with True or False in random order. """ for domino in self.choose_extra_dominoes(random): if domino.head.pips == domino.tail.pips: yield domino, False else: flip_first = random.randint(0, 1) for j in range(2): yield domino, flip_first + j == 1 def fill(self, random, matches_allowed=True, reset_cycles=True): """ Fill any remaining holes in a board with random dominoes. @param random: random number generator for choosing dominoes @param matches_allowed: True if neighbouring dominoes can match @param reset_cycles: True if the infinite loop detection should start again. @return: True if the board is now filled. """ if reset_cycles: self.cycles_remaining = 10000 for y in range(self.height): for x in range(self.width): if self[x][y] is None: return self.fill_space(x, y, random, matches_allowed) return True def fill_space(self, x, y, random, matches_allowed): """ Try all possible dominoes and positions starting at x, y. """ rotation = random.randint(0, 3) * 90 for _ in range(4): try: choices = self.choose_and_flip_extra_dominoes( random) for domino, is_flipped in choices: if self.cycles_remaining <= 0: return False self.cycles_remaining -= 1 domino.rotate_to(rotation) self.add(domino, x, y) self.add_count += 1 has_even_gaps = self.hasEvenGaps() if not has_even_gaps: self.remove(domino) break else: if is_flipped: domino.flip() if not matches_allowed and domino.hasMatch(): pass else: if self.fill(random, matches_allowed, reset_cycles=False): return True self.remove(domino) except BadPositionError: pass rotation = (rotation + 90) % 360 return False def visit_connected(self, cell): cell.visited = True for dx, dy in Domino.directions: x = cell.x + dx y = cell.y + dy if 0 <= x < self.width and 0 <= y < self.height: neighbour = self[x][y] if neighbour is not None and not neighbour.visited: self.visit_connected(neighbour) def is_connected(self): domino = None for domino in self.dominoes: domino.head.visited = False domino.tail.visited = False if domino is None: return True self.visit_connected(domino.head) return all(domino.head.visited and domino.tail.visited for domino in self.dominoes) @property def are_markers_connected(self): unvisited_markers = set(self.markers) if not unvisited_markers: return False x, y = unvisited_markers.pop() self.visit_connected_markers(x, y, unvisited_markers) return not unvisited_markers def visit_connected_markers(self, x: int, y: int, unvisited_markers: set): for dx, dy in Domino.directions: x2 = x + dx y2 = y + dy try: unvisited_markers.remove((x2, y2)) self.visit_connected_markers(x2, y2, unvisited_markers) except KeyError: pass @property def marker_area(self): if not self.markers: return 0 min_x = min_y = max_x = max_y = None for x, y in self.markers: if min_x is None: min_x = max_x = x min_y = max_y = y else: min_x = min(x, min_x) max_x = max(x, max_x) min_y = min(y, min_y) max_y = max(y, max_y) return (max_x - min_x + 1) * (max_y - min_y + 1) def has_loner(self): for domino in self.dominoes: neighbours = domino.find_neighbours() has_matching_neighbour = any(domino.isMatch(neighbour) for neighbour in neighbours) if not has_matching_neighbour: return True return False def hasMatch(self): for domino in self.dominoes: for cell in (domino.head, domino.tail): for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: return True return False def findMatches(self): matches = {} for domino in self.dominoes: for match in domino.findMatches(): matches[(match.x, match.y)] = match match_coordinates = sorted(matches.keys()) return [matches[coord] for coord in match_coordinates] def hasEvenGaps(self): empty_spaces = set() to_visit = set() for y in range(self.height): for x in range(self.width): if self[x][y] is None: empty_spaces.add((x, y)) while empty_spaces: if len(empty_spaces) % 2 != 0: return False to_visit.add(empty_spaces.pop()) while to_visit: x, y = to_visit.pop() for dx, dy in Domino.directions: neighbour = (x+dx, y+dy) try: empty_spaces.remove(neighbour) to_visit.add(neighbour) except KeyError: pass return True class Domino(object): directions = [(1, 0), (0, 1), (-1, 0), (0, -1)] direction_names = 'ruld' alignment_names = 'hvhv' @classmethod def create(cls, max_pips): dominoes = [] for head_pips in range(max_pips+1): for tail_pips in range(head_pips, max_pips+1): dominoes.append(Domino(head_pips, tail_pips)) return dominoes @classmethod def get_direction(self, name): """ Get a direction by name. @return: dx, dy """ index = Domino.direction_names.find(name) return Domino.directions[index] def __init__(self, head, tail): if hasattr(head, 'domino'): self.check_available(head) self.check_available(tail) self.head = head self.tail = tail self.direction = (tail.x - head.x, tail.y - head.y) try: direction_index = self.directions.index(self.direction) except ValueError: msg = (f'Cells are not neighbours: {head.x},{head.y} and ' f'{tail.x},{tail.y}.') raise ValueError(msg) from None self.degrees = direction_index*90 else: self.head = Cell(head) self.tail = Cell(tail) self.degrees = 0 # 0, 90, 180, or 270 self.direction = None self.calculateDirection() self.head.domino = self self.tail.domino = self @staticmethod def check_available(cell): if cell.domino is not None: raise ValueError(f'Cell is not available: {cell.x},{cell.y}.') def __repr__(self): return f"Domino({self.head.pips!r}, {self.tail.pips!r})" def __eq__(self, other): if not isinstance(other, Domino): return False return ((self.head.pips == other.head.pips and self.tail.pips == other.tail.pips) or (self.head.pips == other.tail.pips and self.tail.pips == other.head.pips)) def __ne__(self, other): return not (self == other) def __hash__(self): return hash(self.head.pips) ^ hash(self.tail.pips) def display(self): return '{}|{}'.format(self.head.pips, self.tail.pips) def rotate(self, degrees): self.rotate_to((self.degrees + degrees) % 360) def rotate_to(self, degrees): self.degrees = degrees self.calculateDirection() if self.head.board: dx, dy = self.direction self.head.board.add(self.tail, self.head.x+dx, self.head.y+dy) def move(self, dx, dy): x = self.head.x y = self.head.y board = self.head.board board.remove(self) try: board.add(self, x+dx, y+dy) except Exception: board.add(self, x, y) raise def describe_move(self, dx, dy): direction_name = Domino.describe_direction(dx, dy) return self.get_name() + direction_name @staticmethod def describe_direction(dx, dy): direction_index = Domino.directions.index((dx, dy)) direction_name = Domino.direction_names[direction_index] return direction_name def describe_add(self, x, y): head, tail = self.head, self.tail if self.direction[0]: direction_name = 'h' if self.direction[0] < 0: head, tail = tail, head x -= 1 else: direction_name = 'v' if self.direction[1] > 0: head, tail = tail, head y += 1 return f'{head.pips}{tail.pips}{direction_name}{x+1}{y+1}' def describe_remove(self): dx, dy = self.direction direction_index = Domino.directions.index((dx, dy)) alignment_name = Domino.alignment_names[direction_index] return self.get_name() + alignment_name def get_name(self): name = '{}{}'.format(self.head.pips, self.tail.pips) if 90 <= self.degrees <= 180: name = name[::-1] # reverse return name def dominates_neighbours(self): return (self.head.dominates_neighbours() and self.tail.dominates_neighbours()) def flip(self): board = self.tail.board x, y = self.tail.x, self.tail.y board.remove(self) self.rotate(180) board.add(self, x, y) pass def calculateDirection(self): self.direction = Domino.directions[self.degrees//90] def find_neighbours(self): neighbour_cells = self.find_neighbour_cells() neighbour_dominoes = set(cell.domino for cell in neighbour_cells) return neighbour_dominoes def find_neighbour_cells(self): return chain(self.head.find_neighbours(), self.tail.find_neighbours()) def isMatch(self, other): return (self.head.pips == other.head.pips or self.tail.pips == other.tail.pips or self.head.pips == other.tail.pips or self.tail.pips == other.head.pips) def hasMatch(self): """ True if either cell matches one of its neighbours. Slightly different type of matching from isMatch(). """ for cell in (self.head, self.tail): for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: return True return False def findMatches(self): matches = [] for cell in (self.head, self.tail): is_match = False for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: is_match = True matches.append(neighbour) if is_match: matches.append(cell) return matches class GraphLimitExceeded(RuntimeError): def __init__(self, limit): super(GraphLimitExceeded, self).__init__( 'Graph size limit of {} exceeded.'.format(limit)) self.limit = limit @dataclass(frozen=True) class MoveDescription: move: str new_state: str edge_attrs: dict = None heuristic: float = 0 # Drive search using A*, leave at zero for Dyjkstra. # Similar to heuristic, but doesn't control search. Zero iff new_state is solved. remaining: float = 0 @dataclass class MoveRequest: start_state: str future: Future class BoardGraph(object): def __init__(self, board_class=Board, process_count: int = 0): self.graph = self.start = self.last = self.closest = None self.min_remaining = None # Minimum steps remaining to find a solution. self.board_class = board_class self.process_count = process_count if process_count > 0: self.executor = ProcessPoolExecutor(process_count) else: self.executor: typing.Optional[ProcessPoolExecutor] = None self.closest = None self.is_debugging = False def walk(self, board, size_limit=maxsize) -> typing.Set[str]: self.graph = DiGraph() self.start = board.display(cropped=True) self.graph.add_node(self.start) if self.executor is not None: walker = self.clone() else: walker = None # len of shortest path known from start to a state. g_score = defaultdict(lambda: math.inf) max_pips = board.max_pips start_h = self.calculate_heuristic(board) g_score[self.start] = 0 pending_nodes = PriorityQueue() pending_nodes.add(self.start, start_h) requests: typing.Deque[MoveRequest] = deque() while pending_nodes: if size_limit is not None and len(self.graph) >= size_limit: raise GraphLimitExceeded(size_limit) state = pending_nodes.pop() if not self.executor: moves = self.find_moves(state, max_pips) self.add_moves(state, moves, pending_nodes, g_score) else: request = MoveRequest( state, self.executor.submit(walker.find_moves, state, max_pips)) requests.append(request) while ((not pending_nodes and requests) or len(requests) > 2*self.process_count): request = requests.popleft() state = request.start_state moves = request.future.result() self.add_moves(state, moves, pending_nodes, g_score) return set(self.graph.nodes()) def clone(self) -> 'BoardGraph': """ Create a smaller copy of this object to pass to worker process. """ return self.__class__(self.board_class) def find_moves(self, state, max_pips): board = self.board_class.create(state, border=1, max_pips=max_pips) moves = list(self.generate_moves(board)) return moves def add_moves(self, start_state: str, moves: typing.Iterable[MoveDescription], pending_nodes: PriorityQueue, g_score: typing.Dict[str, float]): state_g_score = g_score[start_state] for description in moves: edge_attrs = description.edge_attrs or {} new_g_score = state_g_score + 1 new_state = description.new_state known_g_score = g_score[new_state] if not self.graph.has_node(new_state): # new node self.graph.add_node(new_state) is_improved = True if self.is_debugging: if description.heuristic == 0: print(new_state) else: is_improved = new_g_score < known_g_score if is_improved: g_score[new_state] = new_g_score f = new_g_score + description.heuristic pending_nodes.add(new_state, f) self.graph.add_edge(start_state, new_state, move=description.move, **edge_attrs) self.check_remaining(description.remaining, new_state) def check_remaining(self, remaining: float, new_state: str): if self.min_remaining is None or remaining < self.min_remaining: self.min_remaining = remaining self.closest = new_state if remaining == 0 and self.last is None: self.last = new_state def calculate_heuristic(self, board: Board) -> float: return 0 def generate_moves(self, board: Board) -> typing.Iterator[MoveDescription]: """ Generate all moves from the board's current state. """ dominoes = set(board.dominoes) for domino in dominoes: dx, dy = domino.direction yield from self.try_move(domino, dx, dy) yield from self.try_move(domino, -dx, -dy) def check_progress(self, board: Board) -> int: """ Check how close a board state is to a solution. """ dominoes = set(board.dominoes) domino_count = len(dominoes) return domino_count def try_move(self, domino, dx, dy): try: new_state, remaining = self.move(domino, dx, dy) move = domino.describe_move(dx, dy) yield MoveDescription(move, new_state, remaining=remaining) except BadPositionError: pass def move(self, domino, dx, dy) -> typing.Tuple[str, int]: """ Move a domino and calculate the new board state. Afterward, put the board back in its original state. @return: the new board state and remaining progress needed @raise BadPositionError: if the move is illegal """ domino.move(dx, dy) try: board = domino.head.board if not board.is_connected(): raise BadPositionError('Board is not connected.') if board.has_loner(): raise BadPositionError('Board has a lonely domino.') remaining = self.check_progress(board) return board.display(cropped=True), remaining finally: domino.move(-dx, -dy) def get_solution(self, return_partial=False, solution_nodes=None): """ Find a solution from the graph of moves. @param return_partial: If True, a partial solution will be returned if no solution exists. @param solution_nodes: Returned from get_solution_nodes(). @return: a list of strings describing each move. Each string is two digits describing the domino that moved plus a letter to show the direction. """ solution = [] if solution_nodes is None: solution_nodes = self.get_solution_nodes(return_partial) for i in range(len(solution_nodes)-1): source, target = solution_nodes[i:i+2] solution.append(self.graph[source][target]['move']) return solution def get_solution_nodes(self, return_partial=False): goal = self.closest if return_partial else self.last or '' solution_nodes = shortest_path(self.graph, self.start, goal) return solution_nodes def get_choice_counts(self, solution_nodes=None): if solution_nodes is None: solution_nodes = self.get_solution_nodes() return [len(self.graph[node]) for node in solution_nodes[:-1]] def get_average_choices(self, solution_nodes=None): choices = self.get_choice_counts(solution_nodes) return sum(choices) / float(len(choices)) if choices else maxsize def get_max_choices(self, solution_nodes=None): choices = self.get_choice_counts(solution_nodes) return max(choices) if choices else maxsize class CaptureBoardGraph(BoardGraph): def move(self, domino, dx, dy, offset=None): """ Move a domino and calculate the new board state. Afterward, put the board back in its original state. @param domino: the domino to move @param dx: the direction to move horizontally @param dy: the direction to move vertically @param offset: [x, y] position to update after the move, or None. The input position is updated to show where that position would be on the new board. The numbers are reduced if the border gets cropped away. @return: the new board state and remaining dominoes @raise BadPositionError: if the move is illegal """ matching_dominoes = set() complement_found = False domino.move(dx, dy) board = domino.head.board try: if not board.is_connected(): raise BadPositionError('Board is not connected after move.') for cell in (domino.head, domino.tail): for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: matching_dominoes.add((neighbour.domino, neighbour.domino.head.x, neighbour.domino.head.y)) complement_found = (complement_found or neighbour.pips + cell.pips == 6) if matching_dominoes: matching_dominoes.add((domino, domino.head.x, domino.head.y)) elif not complement_found: raise BadPositionError( 'A legal move must have captures or complements.') for matching_domino, _, _ in matching_dominoes: board.remove(matching_domino) if not board.is_connected(): raise BadPositionError('Board is not connected after capture.') cropping_bounds = [] if offset is not None else None new_state = board.display(cropped=True, cropping_bounds=cropping_bounds) remaining = self.check_progress(board) if offset is not None: offset[0] -= cropping_bounds[0] offset[1] -= cropping_bounds[1] return new_state, remaining finally: for matching_domino, x, y in matching_dominoes: board.add(matching_domino, x, y) domino.move(-dx, -dy) def check_progress(self, board: Board) -> int: return len(board.display(cropped=True)) class BoardAnalysis(object): WEIGHTS = (-1, -1, 1, -1, -1) @classmethod def calculate_score(cls, values): (min_dominoes, max_choices, solution_length, avg_choices, _graph_size) = values if min_dominoes is not None and min_dominoes > 0: return -min_dominoes*100 return max_choices * 100 - solution_length + avg_choices * 0.1 @classmethod def best_score(cls, population): scores = [cls.calculate_score(ind.fitness.values) for ind in population] positives = [score for score in scores if score > 0] if positives: return min(positives) return max(score for score in scores if score <= 0) def get_values(self): soln_len = len(self.solution) return (self.min_dominoes, 2*self.max_choices + abs(soln_len - OPTIMUM_SOLUTION_LENGTH), soln_len, self.average_choices, self.graph_size) def __init__(self, board, graph, size_limit=maxsize): self.board = board self.start = board.display() try: graph.walk(board, size_limit) except GraphLimitExceeded: raise except Exception: raise self.graph_size = len(graph.graph) self.start = graph.start if graph.last is None: self.min_dominoes = graph.min_remaining self.solution = self.choice_counts = [] self.average_choices = self.max_choices = 0 else: self.min_dominoes = 0 self.solution = graph.get_solution() self.average_choices = graph.get_average_choices() self.max_choices = graph.get_max_choices() self.choice_counts = graph.get_choice_counts() board.solution_length = len(self.solution) def display(self): score = BoardAnalysis.calculate_score(self.get_values()) return ('{} score, {} nodes{}{}, ' 'avg {} and max {} choices {}').format( score, self.graph_size, 200 * ' ', ', '.join(self.solution), self.average_choices, self.max_choices, self.choice_counts) SLOW_BOARD_SIZE = 2000 MAX_BOARD_SIZE = 10000 # 140000 Bad, 70000 Mostly Good class SearchManager(object): def __init__(self, graph_class, max_pips=6): self.graph_class = graph_class self.scores = [] self.graph_sizes = [] self.max_pips = max_pips def create_random_board(self, board_type, random, width, height): while True: board = board_type(width, height, max_pips=self.max_pips) if board.fill(random, matches_allowed=False): return board @staticmethod def mutate_board(board_type, random, board): return board.mutate(random, board_type=board_type, matches_allowed=False), def evaluate_board(self, slow_queue, individual): try: analysis = BoardAnalysis(individual, self.graph_class(), size_limit=SLOW_BOARD_SIZE) values = analysis.get_values() return values except GraphLimitExceeded: slow_queue.put(individual.display()) return (len(individual.dominoes) + 1), 0, 0, 0, 0 def evaluate_slow_boards(self, slow_queue, results_queue): while True: start = slow_queue.get() board = Board.create(start, max_pips=6) try: analysis = BoardAnalysis(board, self.graph_class, size_limit=MAX_BOARD_SIZE) results_queue.put((start, analysis.get_values())) except GraphLimitExceeded: pass def loggedMap(self, pool, function, *args): results = pool.map(function, *args) if function.func is self.evaluate_board: for fitness_values in results: graph_size = fitness_values[-1] score = BoardAnalysis.calculate_score(fitness_values) self.graph_sizes.append(graph_size) self.scores.append(score) iterations = len(self.scores) plt.title('Score vs. Graph Size (n={})'.format(iterations)) plt.plot(self.graph_sizes, self.scores, 'o', alpha=0.3) plt.ylabel("score") plt.xlabel("graph size") plt.savefig('scores.png') plt.close() return results def selectBoards(self, selector, results_queue, hall_of_fame, boardType, population, count): try: while True: start, fitness_values = results_queue.get_nowait() board = boardType.create(start, max_pips=6) board.fitness.values = fitness_values graph_size = fitness_values[-1] score = BoardAnalysis.calculate_score(fitness_values) self.scores.append(score) self.graph_sizes.append(graph_size) hall_of_fame.update([board]) population.append(board) except Empty: pass return selector(population, count) def monitor(hall_of_fame, graph_class): while True: cmd = input("Enter 'p' to print report.\n") if cmd == 'p': hall_of_fame.display(graph_class) CXPB, MUTPB, NPOP, NGEN, WIDTH, HEIGHT = 0.0, 0.5, 1000, 300, 4, 3 MAX_PIPS = 2 OPTIMUM_SOLUTION_LENGTH = WIDTH*HEIGHT def find_boards_with_deap(graph_class=CaptureBoardGraph, board_class=Board): print('Starting.') random = Random() manager = Manager() search_manager = SearchManager(graph_class, max_pips=MAX_PIPS) slow_queue = manager.Queue() results_queue = manager.Queue() creator.create("FitnessMax", base.Fitness, weights=BoardAnalysis.WEIGHTS) # noinspection PyUnresolvedReferences creator.create("Individual", board_class, fitness=creator.FitnessMax) toolbox = base.Toolbox() pool = Pool() halloffame = hall_of_fame.MappedHallOfFame(10, solution_length_index=2) pool.apply_async(search_manager.evaluate_slow_boards, [slow_queue, results_queue]) toolbox.register("map", search_manager.loggedMap, pool) # noinspection PyUnresolvedReferences toolbox.register("individual", search_manager.create_random_board, creator.Individual, random, WIDTH, HEIGHT) # noinspection PyUnresolvedReferences toolbox.register("population", tools.initRepeat, list, toolbox.individual) # toolbox.register("mate", tools.cxTwoPoint) # noinspection PyUnresolvedReferences toolbox.register("mutate", search_manager.mutate_board, creator.Individual, random) # noinspection PyUnresolvedReferences toolbox.register("select", search_manager.selectBoards, partial(tools.selTournament, tournsize=3), results_queue, halloffame, creator.Individual) toolbox.register("evaluate", search_manager.evaluate_board, slow_queue) # noinspection PyUnresolvedReferences pop = toolbox.population(n=NPOP) stats = Statistics() stats.register("best", BoardAnalysis.best_score) verbose = True bg = Thread(target=monitor, args=(halloffame, graph_class)) bg.daemon = True bg.start() eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, stats, halloffame, verbose) halloffame.display(graph_class) def measure_performance(): state = """\ 1|2 4|5 1|1 1|4 5|3 1|0 4 0 2|0 6|2 - - 2 5 4 6|6 2 - - 6 4 4 0|4 2 - - 0 3 4|6 2|5 """ board = Board.create(state, max_pips=6) analysis = BoardAnalysis(board) print(analysis.display()) def analyseRandomBoard(random): # start_time = datetime.now() board = Board(6, 5, max_pips=6) board.fill(random) analysis = BoardAnalysis(board) # duration = (datetime.now() - start_time).total_seconds() return analysis.graph_size, analysis.score def plotPerformance(): iterations = 20 random = Random() end_time = datetime.now() + timedelta(minutes=10) sizes = [] scores = [] while datetime.now() < end_time: size, score = analyseRandomBoard(random) sizes.append(size) scores.append(score) plt.title('Score vs. Graph Size (n={})'.format(iterations)) plt.plot(sizes, scores, 'o', alpha=0.5) plt.ylabel("score") plt.xlabel("graph size") plt.savefig('times.png') print('Done.') def live_main(): max_choices, soln_lens = np.meshgrid( np.arange(0, 15), np.arange(0, 100)) scores = soln_lens - 5*max_choices plt.figure() contour = plt.contour(max_choices, soln_lens, scores) plt.clabel(contour, inline=True) plt.xlabel('max choices') plt.ylabel('solution lengths') plt.savefig('scores.png') print('Done.') if __name__ == '__main__': # plotPerformance() find_boards_with_deap() # measure_performance() elif __name__ == '__live_coding__': live_main()
port_forward.py
import socket import select import sys from threading import Thread import paramiko def handler(chan, host, port): sock = socket.socket() try: sock.connect((host, port)) except Exception as e: print("Forwarding request to %s:%d failed: %r" % (host, port, e)) return print( "Connected! Tunnel open %r -> %r -> %r" % (chan.origin_addr, chan.getpeername(), (host, port)) ) while True: r, w, x = select.select([sock, chan], [], []) if sock in r: data = sock.recv(1024) if len(data) == 0: break chan.send(data) if chan in r: data = chan.recv(1024) if len(data) == 0: break sock.send(data) chan.close() sock.close() def reverse_forward_tunnel(server_port, remote_host, remote_port, transport): transport.request_port_forward("", server_port) while True: chan = transport.accept(1000) if chan is None: continue thr = Thread( target=handler, args=(chan, remote_host, remote_port) ) thr.setDaemon(True) thr.start() def ssh_tunnel(port = 7000, port_remote = 4005): """ ssh -R 4000:internal.example.com:80 public.example.com """ ssh_host = 'kaxtus.com' ssh_port = 22 ssh_user = 'portforward' ssh_pass = 'p0rt!@#' remote_bind_port = port_remote # port on server to forward forward_host = '127.0.0.1' # dest host to forward to forward_port = port # dest port to forward to client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect( ssh_host, ssh_port, timeout=36, username=ssh_user, password=ssh_pass, ) except Exception as e: print("*** Failed to connect to %s:%d: %r" % (ssh_host, ssh_port, e)) sys.exit(1) print( "Now forwarding remote port %d to %s:%d ..." % (remote_bind_port, forward_host, forward_port) ) try: reverse_forward_tunnel( remote_bind_port, forward_host, forward_port, client.get_transport() ) except KeyboardInterrupt: print("C-c: Port forwarding stopped.") sys.exit(0) if __name__ == "__main__": port = 7000 port_remote = 4005 if len(sys.argv) > 1: port = 6969 port_remote = 4004 ssh_tunnel(port, port_remote)
web_sockets.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/web_sockets.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections import logging import queue import threading import weakref from king_phisher import ipaddress from king_phisher import serializers from king_phisher import utilities from king_phisher.server import signals from king_phisher.server.database import models as db_models import advancedhttpserver EventSubscription = collections.namedtuple('EventSubscription', ('attributes', 'event_types')) class Event(object): """ An object representing an event which occurred on the server in a way that is ready to be published to client subscribers. """ __slots__ = ('event_id', 'event_type', 'sources') def __init__(self, event_id, event_type, sources): self.event_id = event_id """The unique string identifier of this event.""" self.event_type = event_type """The unique string identifier of the type of this event.""" self.sources = sources """The source objects which are associated with this event.""" class EventSocket(advancedhttpserver.WebSocketHandler): """ A socket through which server events are published to subscribers. This socket will automatically add and remove itself from the manager that is initialized with. """ logger = logging.getLogger('KingPhisher.Server.WebSocket.EventPublisher') def __init__(self, handler, manager): """ :param handler: The request handler that should be used by this socket. :type handler: :py:class:`advancedhttpserver.RequestHandler` :param manager: The manager that this event socket should register with. :type manager: :py:class:`.WebSocketsManager` """ handler.connection.settimeout(None) self._subscriptions = {} self.rpc_session = handler.rpc_session if self.rpc_session.event_socket is not None: self.rpc_session.event_socket.close() self.rpc_session.event_socket = self manager.add(self) self._manager_ref = weakref.ref(manager) super(EventSocket, self).__init__(handler) def is_subscribed(self, event_id, event_type): """ Check if the client is currently subscribed to the specified server event. :param str event_id: The identifier of the event to subscribe to. :param str event_type: A sub-type for the corresponding event. :return: Whether or not the client is subscribed to the event. :rtype: bool """ if event_id not in self._subscriptions: return False return event_type in self._subscriptions[event_id].event_types def on_closed(self): manager = self._manager_ref() if manager is not None: manager.remove(self) return def publish(self, event): """ Publish the event by sending the relevant information to the client. If the client has not requested to receive the information through a subscription, then no data will be sent. :param event: The object representing the data to be published. :type event: :py:class:`.Event` """ subscription = self._subscriptions.get(event.event_id) if subscription is None: return if event.event_type not in subscription.event_types: return summaries = [] for source in event.sources: if isinstance(source, db_models.Base) and not source.session_has_permissions('r', self.rpc_session): continue summary = dict((attribute, getattr(source, attribute, None)) for attribute in subscription.attributes) summaries.append(summary) if not summaries: return msg = { 'event': { 'id': event.event_id, 'type': event.event_type, 'objects': summaries } } self.logger.debug("publishing event {0} (type: {1}) with {2} objects".format(event.event_id, event.event_type, len(summaries))) self.send_message_text(serializers.JSON.dumps(msg, pretty=False)) def subscribe(self, event_id, event_types=None, attributes=None): """ Subscribe the client to the specified event published by the server. When the event is published the specified *attributes* of it and it's corresponding id and type information will be sent to the client. :param str event_id: The identifier of the event to subscribe to. :param list event_types: A list of sub-types for the corresponding event. :param list attributes: A list of attributes of the event object to be sent to the client. """ utilities.assert_arg_type(event_id, str, arg_pos=1) utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=2) utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=3) subscription = self._subscriptions.get(event_id) if subscription is None: subscription = EventSubscription(attributes=set(), event_types=set()) if event_types is not None: subscription.event_types.update(event_types) if attributes is not None: subscription.attributes.update(attributes) self._subscriptions[event_id] = subscription def unsubscribe(self, event_id, event_types=None, attributes=None): """ Unsubscribe from an event published by the server that the client previously subscribed to. :param str event_id: The identifier of the event to subscribe to. :param list event_types: A list of sub-types for the corresponding event. :param list attributes: A list of attributes of the event object to be sent to the client. """ utilities.assert_arg_type(event_id, str, arg_pos=1) utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=2) utilities.assert_arg_type(event_types, (type(None), list, set, tuple), arg_pos=3) subscription = self._subscriptions.get(event_id) if subscription is None: return if event_types is not None: for event_type in event_types: subscription.event_types.discard(event_type) if attributes is not None: for attribute in attributes: subscription.attributes.discard(attribute) if not subscription.event_types and not subscription.attributes: del self._subscriptions[event_id] class WebSocketsManager(object): """ An object used to manage connected web sockets. """ logger = logging.getLogger('KingPhisher.Server.WebSocketManager') def __init__(self, config, job_manager): """ :param config: Configuration to retrieve settings from. :type config: :py:class:`smoke_zephyr.configuration.Configuration` :param job_manager: A job manager instance that can be used to schedule tasks. :type job_manager: :py:class:`smoke_zephyr.job.JobManager` """ self.config = config self.web_sockets = [] self.job_manager = job_manager self._ping_job = job_manager.job_add(self.ping_all, seconds=30) self._work_queue = queue.Queue() self._worker_thread = threading.Thread(target=self._worker_routine) self._worker_thread.start() signals.db_session_deleted.connect(self._sig_db_deleted) signals.db_session_inserted.connect(self._sig_db_inserted) signals.db_session_updated.connect(self._sig_db_updated) def _sig_db(self, event_id, event_type, targets): event = Event( event_id='db-' + event_id.replace('_', '-'), event_type=event_type, sources=targets ) self._work_queue.put((self._worker_publish_event, (event,))) def _sig_db_deleted(self, event_id, targets, session=None): return self._sig_db(event_id, 'deleted', targets) def _sig_db_inserted(self, event_id, targets, session=None): return self._sig_db(event_id, 'inserted', targets) def _sig_db_updated(self, event_id, targets, session=None): return self._sig_db(event_id, 'updated', targets) def _worker_publish_event(self, event): for web_socket in self.web_sockets: if not isinstance(web_socket, EventSocket): continue web_socket.publish(event) def _worker_routine(self): self.logger.debug("web socket manager worker running in tid: 0x{0:x}".format(threading.current_thread().ident)) while True: job = self._work_queue.get() if job is None: break func, args = job try: func(*args) except Exception: self.logger.error('web socket manager worker thread encountered an exception while processing a job', exc_info=True) def __iter__(self): for web_socket in self.web_sockets: yield web_socket def __len__(self): return len(self.web_sockets) def add(self, web_socket): """ Add a connected web socket to the manager. :param web_socket: The connected web socket. :type web_socket: :py:class:`advancedhttpserver.WebSocketHandler` """ utilities.assert_arg_type(web_socket, advancedhttpserver.WebSocketHandler) self.web_sockets.append(web_socket) def dispatch(self, handler): """ A method that is suitable for use as a :py:attr:`~advancedhttpserver.RequestHandler.web_socket_handler`. :param handler: The current request handler instance. :type handler: :py:class:`~king_phisher.server.server.KingPhisherRequestHandler` """ if not ipaddress.ip_address(handler.client_address[0]).is_loopback: return prefix = '/' if self.config.get('server.vhost_directories'): prefix += handler.vhost + '/' request_path = handler.path if request_path.startswith(prefix): request_path = request_path[len(prefix):] if request_path == '_/ws/events/json': EventSocket(handler, self) return handler.respond_not_found() return def ping_all(self): """ Ping all of the connected web sockets to ensure they stay alive. This method is automatically executed periodically through a job added when the manager is initialized. """ disconnected = collections.deque() for web_socket in self.web_sockets: if web_socket.connected: try: web_socket.ping() except Exception: self.logger.error('error occurred while pinging the web socket, closing it', exc_info=True) web_socket.close() else: continue disconnected.append(web_socket) for web_socket in disconnected: self.logger.debug('closing a disconnected web socket') self.web_sockets.remove(web_socket) def remove(self, web_socket): """ Remove a connected web socket from those that are currently being managed. If the web socket is not currently being managed, no changes are made. :param web_socket: The connected web socket. :type web_socket: :py:class:`advancedhttpserver.WebSocketHandler` """ if web_socket in self.web_sockets: self.web_sockets.remove(web_socket) def stop(self): """ Shutdown the manager and clean up the resources it has allocated. """ self.job_manager.job_delete(self._ping_job) for web_socket in self.web_sockets: if web_socket.connected: web_socket.close() self.web_sockets = [] self._work_queue.put(None) self._worker_thread.join()
twisterlib.py
#!/usr/bin/env python3 # vim: set syntax=python ts=4 : # # Copyright (c) 2018 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import contextlib import string import mmap import sys import re import subprocess import select import shutil import shlex import signal import threading import concurrent.futures from collections import OrderedDict import queue import time import csv import glob import concurrent import xml.etree.ElementTree as ET import logging from pathlib import Path from distutils.spawn import find_executable from colorama import Fore import pickle import platform import yaml import json from multiprocessing import Lock, Process, Value from typing import List try: # Use the C LibYAML parser if available, rather than the Python parser. # It's much faster. from yaml import CSafeLoader as SafeLoader from yaml import CDumper as Dumper except ImportError: from yaml import SafeLoader, Dumper try: import serial except ImportError: print("Install pyserial python module with pip to use --device-testing option.") try: from tabulate import tabulate except ImportError: print("Install tabulate python module with pip to use --device-testing option.") try: import psutil except ImportError: print("Install psutil python module with pip to run in Qemu.") try: import pty except ImportError as capture_error: if os.name == "nt": # "nt" means that program is running on Windows OS pass # "--device-serial-pty" option is not supported on Windows OS else: raise capture_error ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") if not ZEPHYR_BASE: sys.exit("$ZEPHYR_BASE environment variable undefined") # This is needed to load edt.pickle files. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts", "python-devicetree", "src")) from devicetree import edtlib # pylint: disable=unused-import # Use this for internal comparisons; that's what canonicalization is # for. Don't use it when invoking other components of the build system # to avoid confusing and hard to trace inconsistencies in error messages # and logs, generated Makefiles, etc. compared to when users invoke these # components directly. # Note "normalization" is different from canonicalization, see os.path. canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE) sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/")) import scl import expr_parser logger = logging.getLogger('twister') logger.setLevel(logging.DEBUG) class ExecutionCounter(object): def __init__(self, total=0): self._done = Value('i', 0) self._passed = Value('i', 0) self._skipped_configs = Value('i', 0) self._skipped_runtime = Value('i', 0) self._skipped_cases = Value('i', 0) self._error = Value('i', 0) self._failed = Value('i', 0) self._total = Value('i', total) self._cases = Value('i', 0) self.lock = Lock() @property def cases(self): with self._cases.get_lock(): return self._cases.value @cases.setter def cases(self, value): with self._cases.get_lock(): self._cases.value = value @property def skipped_cases(self): with self._skipped_cases.get_lock(): return self._skipped_cases.value @skipped_cases.setter def skipped_cases(self, value): with self._skipped_cases.get_lock(): self._skipped_cases.value = value @property def error(self): with self._error.get_lock(): return self._error.value @error.setter def error(self, value): with self._error.get_lock(): self._error.value = value @property def done(self): with self._done.get_lock(): return self._done.value @done.setter def done(self, value): with self._done.get_lock(): self._done.value = value @property def passed(self): with self._passed.get_lock(): return self._passed.value @passed.setter def passed(self, value): with self._passed.get_lock(): self._passed.value = value @property def skipped_configs(self): with self._skipped_configs.get_lock(): return self._skipped_configs.value @skipped_configs.setter def skipped_configs(self, value): with self._skipped_configs.get_lock(): self._skipped_configs.value = value @property def skipped_runtime(self): with self._skipped_runtime.get_lock(): return self._skipped_runtime.value @skipped_runtime.setter def skipped_runtime(self, value): with self._skipped_runtime.get_lock(): self._skipped_runtime.value = value @property def failed(self): with self._failed.get_lock(): return self._failed.value @failed.setter def failed(self, value): with self._failed.get_lock(): self._failed.value = value @property def total(self): with self._total.get_lock(): return self._total.value class CMakeCacheEntry: '''Represents a CMake cache entry. This class understands the type system in a CMakeCache.txt, and converts the following cache types to Python types: Cache Type Python type ---------- ------------------------------------------- FILEPATH str PATH str STRING str OR list of str (if ';' is in the value) BOOL bool INTERNAL str OR list of str (if ';' is in the value) ---------- ------------------------------------------- ''' # Regular expression for a cache entry. # # CMake variable names can include escape characters, allowing a # wider set of names than is easy to match with a regular # expression. To be permissive here, use a non-greedy match up to # the first colon (':'). This breaks if the variable name has a # colon inside, but it's good enough. CACHE_ENTRY = re.compile( r'''(?P<name>.*?) # name :(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type =(?P<value>.*) # value ''', re.X) @classmethod def _to_bool(cls, val): # Convert a CMake BOOL string into a Python bool. # # "True if the constant is 1, ON, YES, TRUE, Y, or a # non-zero number. False if the constant is 0, OFF, NO, # FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in # the suffix -NOTFOUND. Named boolean constants are # case-insensitive. If the argument is not one of these # constants, it is treated as a variable." # # https://cmake.org/cmake/help/v3.0/command/if.html val = val.upper() if val in ('ON', 'YES', 'TRUE', 'Y'): return 1 elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''): return 0 elif val.endswith('-NOTFOUND'): return 0 else: try: v = int(val) return v != 0 except ValueError as exc: raise ValueError('invalid bool {}'.format(val)) from exc @classmethod def from_line(cls, line, line_no): # Comments can only occur at the beginning of a line. # (The value of an entry could contain a comment character). if line.startswith('//') or line.startswith('#'): return None # Whitespace-only lines do not contain cache entries. if not line.strip(): return None m = cls.CACHE_ENTRY.match(line) if not m: return None name, type_, value = (m.group(g) for g in ('name', 'type', 'value')) if type_ == 'BOOL': try: value = cls._to_bool(value) except ValueError as exc: args = exc.args + ('on line {}: {}'.format(line_no, line),) raise ValueError(args) from exc elif type_ in ['STRING', 'INTERNAL']: # If the value is a CMake list (i.e. is a string which # contains a ';'), convert to a Python list. if ';' in value: value = value.split(';') return CMakeCacheEntry(name, value) def __init__(self, name, value): self.name = name self.value = value def __str__(self): fmt = 'CMakeCacheEntry(name={}, value={})' return fmt.format(self.name, self.value) class CMakeCache: '''Parses and represents a CMake cache file.''' @staticmethod def from_file(cache_file): return CMakeCache(cache_file) def __init__(self, cache_file): self.cache_file = cache_file self.load(cache_file) def load(self, cache_file): entries = [] with open(cache_file, 'r') as cache: for line_no, line in enumerate(cache): entry = CMakeCacheEntry.from_line(line, line_no) if entry: entries.append(entry) self._entries = OrderedDict((e.name, e) for e in entries) def get(self, name, default=None): entry = self._entries.get(name) if entry is not None: return entry.value else: return default def get_list(self, name, default=None): if default is None: default = [] entry = self._entries.get(name) if entry is not None: value = entry.value if isinstance(value, list): return value elif isinstance(value, str): return [value] if value else [] else: msg = 'invalid value {} type {}' raise RuntimeError(msg.format(value, type(value))) else: return default def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name].value def __setitem__(self, name, entry): if not isinstance(entry, CMakeCacheEntry): msg = 'improper type {} for value {}, expecting CMakeCacheEntry' raise TypeError(msg.format(type(entry), entry)) self._entries[name] = entry def __delitem__(self, name): del self._entries[name] def __iter__(self): return iter(self._entries.values()) class TwisterException(Exception): pass class TwisterRuntimeError(TwisterException): pass class ConfigurationError(TwisterException): def __init__(self, cfile, message): TwisterException.__init__(self, cfile + ": " + message) class BuildError(TwisterException): pass class ExecutionError(TwisterException): pass class HarnessImporter: def __init__(self, name): sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister")) module = __import__("harness") if name: my_class = getattr(module, name) else: my_class = getattr(module, "Test") self.instance = my_class() class Handler: def __init__(self, instance, type_str="build"): """Constructor """ self.state = "waiting" self.run = False self.duration = 0 self.type_str = type_str self.binary = None self.pid_fn = None self.call_make_run = False self.name = instance.name self.instance = instance self.timeout = instance.testcase.timeout self.sourcedir = instance.testcase.source_dir self.build_dir = instance.build_dir self.log = os.path.join(self.build_dir, "handler.log") self.returncode = 0 self.set_state("running", self.duration) self.generator = None self.generator_cmd = None self.args = [] self.terminated = False def set_state(self, state, duration): self.state = state self.duration = duration def get_state(self): ret = (self.state, self.duration) return ret def record(self, harness): if harness.recording: filename = os.path.join(self.build_dir, "recording.csv") with open(filename, "at") as csvfile: cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep) cw.writerow(harness.fieldnames) for instance in harness.recording: cw.writerow(instance) def terminate(self, proc): # encapsulate terminate functionality so we do it consistently where ever # we might want to terminate the proc. We need try_kill_process_by_pid # because of both how newer ninja (1.6.0 or greater) and .NET / renode # work. Newer ninja's don't seem to pass SIGTERM down to the children # so we need to use try_kill_process_by_pid. for child in psutil.Process(proc.pid).children(recursive=True): try: os.kill(child.pid, signal.SIGTERM) except ProcessLookupError: pass proc.terminate() # sleep for a while before attempting to kill time.sleep(0.5) proc.kill() self.terminated = True def add_missing_testscases(self, harness): """ If testsuite was broken by some error (e.g. timeout) it is necessary to add information about next testcases, which were not be performed due to this error. """ for c in self.instance.testcase.cases: if c not in harness.tests: harness.tests[c] = "BLOCK" class BinaryHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.call_west_flash = False # Tool options self.valgrind = False self.lsan = False self.asan = False self.ubsan = False self.coverage = False def try_kill_process_by_pid(self): if self.pid_fn: pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) self.pid_fn = None # clear so we don't try to kill the binary twice try: os.kill(pid, signal.SIGTERM) except ProcessLookupError: pass def _output_reader(self, proc): self.line = proc.stdout.readline() def _output_handler(self, proc, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") timeout_extended = False timeout_time = time.time() + self.timeout while True: this_timeout = timeout_time - time.time() if this_timeout < 0: break reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True) reader_t.start() reader_t.join(this_timeout) if not reader_t.is_alive(): line = self.line logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip())) log_out_fp.write(line.decode('utf-8')) log_out_fp.flush() harness.handle(line.decode('utf-8').rstrip()) if harness.state: if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 else: reader_t.join(0) break try: # POSIX arch based ztests end on their own, # so let's give it up to 100ms to do so proc.wait(0.1) except subprocess.TimeoutExpired: self.terminate(proc) log_out_fp.close() def handle(self): harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) if self.call_make_run: command = [self.generator_cmd, "run"] elif self.call_west_flash: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] else: command = [self.binary] run_valgrind = False if self.valgrind and shutil.which("valgrind"): command = ["valgrind", "--error-exitcode=2", "--leak-check=full", "--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp", "--log-file=" + self.build_dir + "/valgrind.log" ] + command run_valgrind = True logger.debug("Spawning process: " + " ".join(shlex.quote(word) for word in command) + os.linesep + "in directory: " + self.build_dir) start_time = time.time() env = os.environ.copy() if self.asan: env["ASAN_OPTIONS"] = "log_path=stdout:" + \ env.get("ASAN_OPTIONS", "") if not self.lsan: env["ASAN_OPTIONS"] += "detect_leaks=0" if self.ubsan: env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \ env.get("UBSAN_OPTIONS", "") with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc: logger.debug("Spawning BinaryHandler Thread for %s" % self.name) t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True) t.start() t.join() if t.is_alive(): self.terminate(proc) t.join() proc.wait() self.returncode = proc.returncode self.try_kill_process_by_pid() handler_time = time.time() - start_time if self.coverage: subprocess.call(["GCOV_PREFIX=" + self.build_dir, "gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True) # FIXME: This is needed when killing the simulator, the console is # garbled and needs to be reset. Did not find a better way to do that. if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests if not self.terminated and self.returncode != 0: # When a process is killed, the default handler returns 128 + SIGTERM # so in that case the return code itself is not meaningful self.set_state("failed", handler_time) self.instance.reason = "Failed" elif run_valgrind and self.returncode == 2: self.set_state("failed", handler_time) self.instance.reason = "Valgrind error" elif harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state("timeout", handler_time) self.instance.reason = "Timeout" self.add_missing_testscases(harness) self.record(harness) class DeviceHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.suite = None def monitor_serial(self, ser, halt_fileno, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") ser_fileno = ser.fileno() readlist = [halt_fileno, ser_fileno] if self.coverage: # Set capture_coverage to True to indicate that right after # test results we should get coverage data, otherwise we exit # from the test. harness.capture_coverage = True ser.flush() while ser.isOpen(): readable, _, _ = select.select(readlist, [], [], self.timeout) if halt_fileno in readable: logger.debug('halted') ser.close() break if ser_fileno not in readable: continue # Timeout. serial_line = None try: serial_line = ser.readline() except TypeError: pass except serial.SerialException: ser.close() break # Just because ser_fileno has data doesn't mean an entire line # is available yet. if serial_line: sl = serial_line.decode('utf-8', 'ignore').lstrip() logger.debug("DEVICE: {0}".format(sl.rstrip())) log_out_fp.write(sl) log_out_fp.flush() harness.handle(sl.rstrip()) if harness.state: if not harness.capture_coverage: ser.close() break log_out_fp.close() def device_is_available(self, instance): device = instance.platform.name fixture = instance.testcase.harness_config.get("fixture") for d in self.suite.duts: if fixture and fixture not in d.fixtures: continue if d.platform != device or not (d.serial or d.serial_pty): continue d.lock.acquire() avail = False if d.available: d.available = 0 d.counter += 1 avail = True d.lock.release() if avail: return d return None def make_device_available(self, serial): for d in self.suite.duts: if d.serial == serial or d.serial_pty: d.available = 1 @staticmethod def run_custom_script(script, timeout): with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: stdout, _ = proc.communicate(timeout=timeout) logger.debug(stdout.decode()) except subprocess.TimeoutExpired: proc.kill() proc.communicate() logger.error("{} timed out".format(script)) def handle(self): out_state = "failed" runner = None hardware = self.device_is_available(self.instance) while not hardware: logger.debug("Waiting for device {} to become available".format(self.instance.platform.name)) time.sleep(1) hardware = self.device_is_available(self.instance) runner = hardware.runner or self.suite.west_runner serial_pty = hardware.serial_pty ser_pty_process = None if serial_pty: master, slave = pty.openpty() try: ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master) except subprocess.CalledProcessError as error: logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output)) return serial_device = os.ttyname(slave) else: serial_device = hardware.serial logger.debug("Using serial device {} @ {} baud".format(serial_device, hardware.serial_baud)) if (self.suite.west_flash is not None) or runner: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] command_extra_args = [] # There are three ways this option is used. # 1) bare: --west-flash # This results in options.west_flash == [] # 2) with a value: --west-flash="--board-id=42" # This results in options.west_flash == "--board-id=42" # 3) Multiple values: --west-flash="--board-id=42,--erase" # This results in options.west_flash == "--board-id=42 --erase" if self.suite.west_flash and self.suite.west_flash != []: command_extra_args.extend(self.suite.west_flash.split(',')) if runner: command.append("--runner") command.append(runner) board_id = hardware.probe_id or hardware.id product = hardware.product if board_id is not None: if runner == "pyocd": command_extra_args.append("--board-id") command_extra_args.append(board_id) elif runner == "nrfjprog": command_extra_args.append("--snr") command_extra_args.append(board_id) elif runner == "openocd" and product == "STM32 STLink": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "STLINK-V3": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "EDBG CMSIS-DAP": command_extra_args.append("--cmd-pre-init") command_extra_args.append("cmsis_dap_serial %s" % (board_id)) elif runner == "jlink": command.append("--tool-opt=-SelectEmuBySN %s" % (board_id)) if command_extra_args != []: command.append('--') command.extend(command_extra_args) else: command = [self.generator_cmd, "-C", self.build_dir, "flash"] pre_script = hardware.pre_script post_flash_script = hardware.post_flash_script post_script = hardware.post_script if pre_script: self.run_custom_script(pre_script, 30) try: ser = serial.Serial( serial_device, baudrate=hardware.serial_baud, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=self.timeout ) except serial.SerialException as e: self.set_state("failed", 0) self.instance.reason = "Failed" logger.error("Serial device error: %s" % (str(e))) if serial_pty and ser_pty_process: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) self.make_device_available(serial_device) return ser.flush() harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) read_pipe, write_pipe = os.pipe() start_time = time.time() t = threading.Thread(target=self.monitor_serial, daemon=True, args=(ser, read_pipe, harness)) t.start() d_log = "{}/device.log".format(self.instance.build_dir) logger.debug('Flash command: %s', command) try: stdout = stderr = None with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: (stdout, stderr) = proc.communicate(timeout=30) logger.debug(stdout.decode()) if proc.returncode != 0: self.instance.reason = "Device issue (Flash?)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) os.write(write_pipe, b'x') # halt the thread out_state = "flash_error" except subprocess.TimeoutExpired: proc.kill() (stdout, stderr) = proc.communicate() self.instance.reason = "Device issue (Timeout)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) except subprocess.CalledProcessError: os.write(write_pipe, b'x') # halt the thread if post_flash_script: self.run_custom_script(post_flash_script, 30) t.join(self.timeout) if t.is_alive(): logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name)) out_state = "timeout" if ser.isOpen(): ser.close() if serial_pty: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) os.close(write_pipe) os.close(read_pipe) handler_time = time.time() - start_time if out_state in ["timeout", "flash_error"]: self.add_missing_testscases(harness) if out_state == "timeout": self.instance.reason = "Timeout" elif out_state == "flash_error": self.instance.reason = "Flash error" if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests # sometimes a test instance hasn't been executed successfully with an # empty dictionary results, in order to include it into final report, # so fill the results as BLOCK if self.instance.results == {}: for k in self.instance.testcase.cases: self.instance.results[k] = 'BLOCK' if harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state(out_state, handler_time) if post_script: self.run_custom_script(post_script, 30) self.make_device_available(serial_device) self.record(harness) class QEMUHandler(Handler): """Spawns a thread to monitor QEMU output from pipes We pass QEMU_PIPE to 'make run' and monitor the pipes for output. We need to do this as once qemu starts, it runs forever until killed. Test cases emit special messages to the console as they run, we check for these to collect whether the test passed or failed. """ def __init__(self, instance, type_str): """Constructor @param instance Test instance """ super().__init__(instance, type_str) self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(instance.build_dir, "qemu.pid") if "ignore_qemu_crash" in instance.testcase.tags: self.ignore_qemu_crash = True self.ignore_unexpected_eof = True else: self.ignore_qemu_crash = False self.ignore_unexpected_eof = False @staticmethod def _get_cpu_time(pid): """get process CPU time. The guest virtual time in QEMU icount mode isn't host time and it's maintained by counting guest instructions, so we use QEMU process exection time to mostly simulate the time of guest OS. """ proc = psutil.Process(pid) cpu_time = proc.cpu_times() return cpu_time.user + cpu_time.system @staticmethod def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness, ignore_unexpected_eof=False): fifo_in = fifo_fn + ".in" fifo_out = fifo_fn + ".out" # These in/out nodes are named from QEMU's perspective, not ours if os.path.exists(fifo_in): os.unlink(fifo_in) os.mkfifo(fifo_in) if os.path.exists(fifo_out): os.unlink(fifo_out) os.mkfifo(fifo_out) # We don't do anything with out_fp but we need to open it for # writing so that QEMU doesn't block, due to the way pipes work out_fp = open(fifo_in, "wb") # Disable internal buffering, we don't # want read() or poll() to ever block if there is data in there in_fp = open(fifo_out, "rb", buffering=0) log_out_fp = open(logfile, "wt") start_time = time.time() timeout_time = start_time + timeout p = select.poll() p.register(in_fp, select.POLLIN) out_state = None line = "" timeout_extended = False pid = 0 if os.path.exists(pid_fn): pid = int(open(pid_fn).read()) while True: this_timeout = int((timeout_time - time.time()) * 1000) if this_timeout < 0 or not p.poll(this_timeout): try: if pid and this_timeout > 0: #there's possibility we polled nothing because #of not enough CPU time scheduled by host for #QEMU process during p.poll(this_timeout) cpu_time = QEMUHandler._get_cpu_time(pid) if cpu_time < timeout and not out_state: timeout_time = time.time() + (timeout - cpu_time) continue except ProcessLookupError: out_state = "failed" break if not out_state: out_state = "timeout" break if pid == 0 and os.path.exists(pid_fn): pid = int(open(pid_fn).read()) if harness.is_pytest: harness.handle(None) out_state = harness.state break try: c = in_fp.read(1).decode("utf-8") except UnicodeDecodeError: # Test is writing something weird, fail out_state = "unexpected byte" break if c == "": # EOF, this shouldn't happen unless QEMU crashes if not ignore_unexpected_eof: out_state = "unexpected eof" break line = line + c if c != "\n": continue # line contains a full line of data output from QEMU log_out_fp.write(line) log_out_fp.flush() line = line.strip() logger.debug(f"QEMU ({pid}): {line}") harness.handle(line) if harness.state: # if we have registered a fail make sure the state is not # overridden by a false success message coming from the # testsuite if out_state not in ['failed', 'unexpected eof', 'unexpected byte']: out_state = harness.state # if we get some state, that means test is doing well, we reset # the timeout and wait for 2 more seconds to catch anything # printed late. We wait much longer if code # coverage is enabled since dumping this information can # take some time. if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 line = "" if harness.is_pytest: harness.pytest_run(logfile) out_state = harness.state handler.record(harness) handler_time = time.time() - start_time logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds") if out_state == "timeout": handler.instance.reason = "Timeout" handler.set_state("failed", handler_time) elif out_state == "failed": handler.instance.reason = "Failed" handler.set_state("failed", handler_time) elif out_state in ['unexpected eof', 'unexpected byte']: handler.instance.reason = out_state handler.set_state("failed", handler_time) else: handler.set_state(out_state, handler_time) log_out_fp.close() out_fp.close() in_fp.close() if pid: try: if pid: os.kill(pid, signal.SIGTERM) except ProcessLookupError: # Oh well, as long as it's dead! User probably sent Ctrl-C pass os.unlink(fifo_in) os.unlink(fifo_out) def handle(self): self.results = {} self.run = True # We pass this to QEMU which looks for fifos with .in and .out # suffixes. self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid") if os.path.exists(self.pid_fn): os.unlink(self.pid_fn) self.log_fn = self.log harness_import = HarnessImporter(self.instance.testcase.harness.capitalize()) harness = harness_import.instance harness.configure(self.instance) self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread, args=(self, self.timeout, self.build_dir, self.log_fn, self.fifo_fn, self.pid_fn, self.results, harness, self.ignore_unexpected_eof)) self.instance.results = harness.tests self.thread.daemon = True logger.debug("Spawning QEMUHandler Thread for %s" % self.name) self.thread.start() if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) logger.debug("Running %s (%s)" % (self.name, self.type_str)) command = [self.generator_cmd] command += ["-C", self.build_dir, "run"] is_timeout = False qemu_pid = None with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc: logger.debug("Spawning QEMUHandler Thread for %s" % self.name) try: proc.wait(self.timeout) except subprocess.TimeoutExpired: # sometimes QEMU can't handle SIGTERM signal correctly # in that case kill -9 QEMU process directly and leave # twister to judge testing result by console output is_timeout = True self.terminate(proc) if harness.state == "passed": self.returncode = 0 else: self.returncode = proc.returncode else: if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}") self.returncode = proc.returncode # Need to wait for harness to finish processing # output from QEMU. Otherwise it might miss some # error messages. self.thread.join(0) if self.thread.is_alive(): logger.debug("Timed out while monitoring QEMU output") if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}") if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state: self.set_state("failed", 0) if is_timeout: self.instance.reason = "Timeout" else: self.instance.reason = "Exited with {}".format(self.returncode) self.add_missing_testscases(harness) def get_fifo(self): return self.fifo_fn class SizeCalculator: alloc_sections = [ "bss", "noinit", "app_bss", "app_noinit", "ccm_bss", "ccm_noinit" ] rw_sections = [ "datas", "initlevel", "exceptions", "initshell", "_static_thread_data_area", "k_timer_area", "k_mem_slab_area", "k_mem_pool_area", "sw_isr_table", "k_sem_area", "k_mutex_area", "app_shmem_regions", "_k_fifo_area", "_k_lifo_area", "k_stack_area", "k_msgq_area", "k_mbox_area", "k_pipe_area", "net_if_area", "net_if_dev_area", "net_l2_area", "net_l2_data", "k_queue_area", "_net_buf_pool_area", "app_datas", "kobject_data", "mmu_tables", "app_pad", "priv_stacks", "ccm_data", "usb_descriptor", "usb_data", "usb_bos_desc", "uart_mux", 'log_backends_sections', 'log_dynamic_sections', 'log_const_sections', "app_smem", 'shell_root_cmds_sections', 'log_const_sections', "font_entry_sections", "priv_stacks_noinit", "_GCOV_BSS_SECTION_NAME", "gcov", "nocache", "devices", "k_heap_area", ] # These get copied into RAM only on non-XIP ro_sections = [ "rom_start", "text", "ctors", "init_array", "reset", "z_object_assignment_area", "rodata", "net_l2", "vector", "sw_isr_table", "settings_handler_static_area", "bt_l2cap_fixed_chan_area", "bt_l2cap_br_fixed_chan_area", "bt_gatt_service_static_area", "vectors", "net_socket_register_area", "net_ppp_proto", "shell_area", "tracing_backend_area", "ppp_protocol_handler_area", ] def __init__(self, filename, extra_sections): """Constructor @param filename Path to the output binary The <filename> is parsed by objdump to determine section sizes """ # Make sure this is an ELF binary with open(filename, "rb") as f: magic = f.read(4) try: if magic != b'\x7fELF': raise TwisterRuntimeError("%s is not an ELF binary" % filename) except Exception as e: print(str(e)) sys.exit(2) # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. # GREP can not be used as it returns an error if the symbol is not # found. is_xip_command = "nm " + filename + \ " | awk '/CONFIG_XIP/ { print $3 }'" is_xip_output = subprocess.check_output( is_xip_command, shell=True, stderr=subprocess.STDOUT).decode( "utf-8").strip() try: if is_xip_output.endswith("no symbols"): raise TwisterRuntimeError("%s has no symbol information" % filename) except Exception as e: print(str(e)) sys.exit(2) self.is_xip = (len(is_xip_output) != 0) self.filename = filename self.sections = [] self.rom_size = 0 self.ram_size = 0 self.extra_sections = extra_sections self._calculate_sizes() def get_ram_size(self): """Get the amount of RAM the application will use up on the device @return amount of RAM, in bytes """ return self.ram_size def get_rom_size(self): """Get the size of the data that this application uses on device's flash @return amount of ROM, in bytes """ return self.rom_size def unrecognized_sections(self): """Get a list of sections inside the binary that weren't recognized @return list of unrecognized section names """ slist = [] for v in self.sections: if not v["recognized"]: slist.append(v["name"]) return slist def _calculate_sizes(self): """ Calculate RAM and ROM usage by section """ objdump_command = "objdump -h " + self.filename objdump_output = subprocess.check_output( objdump_command, shell=True).decode("utf-8").splitlines() for line in objdump_output: words = line.split() if not words: # Skip lines that are too short continue index = words[0] if not index[0].isdigit(): # Skip lines that do not start continue # with a digit name = words[1] # Skip lines with section names if name[0] == '.': # starting with '.' continue # TODO this doesn't actually reflect the size in flash or RAM as # it doesn't include linker-imposed padding between sections. # It is close though. size = int(words[2], 16) if size == 0: continue load_addr = int(words[4], 16) virt_addr = int(words[3], 16) # Add section to memory use totals (for both non-XIP and XIP scenarios) # Unrecognized section names are not included in the calculations. recognized = True if name in SizeCalculator.alloc_sections: self.ram_size += size stype = "alloc" elif name in SizeCalculator.rw_sections: self.ram_size += size self.rom_size += size stype = "rw" elif name in SizeCalculator.ro_sections: self.rom_size += size if not self.is_xip: self.ram_size += size stype = "ro" else: stype = "unknown" if name not in self.extra_sections: recognized = False self.sections.append({"name": name, "load_addr": load_addr, "size": size, "virt_addr": virt_addr, "type": stype, "recognized": recognized}) class TwisterConfigParser: """Class to read test case files with semantic checking """ def __init__(self, filename, schema): """Instantiate a new TwisterConfigParser object @param filename Source .yaml file to read """ self.data = {} self.schema = schema self.filename = filename self.tests = {} self.common = {} def load(self): self.data = scl.yaml_load_verify(self.filename, self.schema) if 'tests' in self.data: self.tests = self.data['tests'] if 'common' in self.data: self.common = self.data['common'] def _cast_value(self, value, typestr): if isinstance(value, str): v = value.strip() if typestr == "str": return v elif typestr == "float": return float(value) elif typestr == "int": return int(value) elif typestr == "bool": return value elif typestr.startswith("list") and isinstance(value, list): return value elif typestr.startswith("list") and isinstance(value, str): vs = v.split() if len(typestr) > 4 and typestr[4] == ":": return [self._cast_value(vsi, typestr[5:]) for vsi in vs] else: return vs elif typestr.startswith("set"): vs = v.split() if len(typestr) > 3 and typestr[3] == ":": return {self._cast_value(vsi, typestr[4:]) for vsi in vs} else: return set(vs) elif typestr.startswith("map"): return value else: raise ConfigurationError( self.filename, "unknown type '%s'" % value) def get_test(self, name, valid_keys): """Get a dictionary representing the keys/values within a test @param name The test in the .yaml file to retrieve data from @param valid_keys A dictionary representing the intended semantics for this test. Each key in this dictionary is a key that could be specified, if a key is given in the .yaml file which isn't in here, it will generate an error. Each value in this dictionary is another dictionary containing metadata: "default" - Default value if not given "type" - Data type to convert the text value to. Simple types supported are "str", "float", "int", "bool" which will get converted to respective Python data types. "set" and "list" may also be specified which will split the value by whitespace (but keep the elements as strings). finally, "list:<type>" and "set:<type>" may be given which will perform a type conversion after splitting the value up. "required" - If true, raise an error if not defined. If false and "default" isn't specified, a type conversion will be done on an empty string @return A dictionary containing the test key-value pairs with type conversion and default values filled in per valid_keys """ d = {} for k, v in self.common.items(): d[k] = v for k, v in self.tests[name].items(): if k in d: if isinstance(d[k], str): # By default, we just concatenate string values of keys # which appear both in "common" and per-test sections, # but some keys are handled in adhoc way based on their # semantics. if k == "filter": d[k] = "(%s) and (%s)" % (d[k], v) else: d[k] += " " + v else: d[k] = v for k, kinfo in valid_keys.items(): if k not in d: if "required" in kinfo: required = kinfo["required"] else: required = False if required: raise ConfigurationError( self.filename, "missing required value for '%s' in test '%s'" % (k, name)) else: if "default" in kinfo: default = kinfo["default"] else: default = self._cast_value("", kinfo["type"]) d[k] = default else: try: d[k] = self._cast_value(d[k], kinfo["type"]) except ValueError: raise ConfigurationError( self.filename, "bad %s value '%s' for key '%s' in name '%s'" % (kinfo["type"], d[k], k, name)) return d class Platform: """Class representing metadata for a particular platform Maps directly to BOARD when building""" platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "platform-schema.yaml")) def __init__(self): """Constructor. """ self.name = "" self.twister = True # if no RAM size is specified by the board, take a default of 128K self.ram = 128 self.ignore_tags = [] self.only_tags = [] self.default = False # if no flash size is specified by the board, take a default of 512K self.flash = 512 self.supported = set() self.arch = "" self.type = "na" self.simulation = "na" self.supported_toolchains = [] self.env = [] self.env_satisfied = True self.filter_data = dict() def load(self, platform_file): scp = TwisterConfigParser(platform_file, self.platform_schema) scp.load() data = scp.data self.name = data['identifier'] self.twister = data.get("twister", True) # if no RAM size is specified by the board, take a default of 128K self.ram = data.get("ram", 128) testing = data.get("testing", {}) self.ignore_tags = testing.get("ignore_tags", []) self.only_tags = testing.get("only_tags", []) self.default = testing.get("default", False) # if no flash size is specified by the board, take a default of 512K self.flash = data.get("flash", 512) self.supported = set() for supp_feature in data.get("supported", []): for item in supp_feature.split(":"): self.supported.add(item) self.arch = data['arch'] self.type = data.get('type', "na") self.simulation = data.get('simulation', "na") self.supported_toolchains = data.get("toolchain", []) self.env = data.get("env", []) self.env_satisfied = True for env in self.env: if not os.environ.get(env, None): self.env_satisfied = False def __repr__(self): return "<%s on %s>" % (self.name, self.arch) class DisablePyTestCollectionMixin(object): __test__ = False class ScanPathResult: """Result of the TestCase.scan_path function call. Attributes: matches A list of test cases warnings A string containing one or more warnings to display has_registered_test_suites Whether or not the path contained any calls to the ztest_register_test_suite macro. has_run_registered_test_suites Whether or not the path contained at least one call to ztest_run_registered_test_suites. has_test_main Whether or not the path contains a definition of test_main(void) """ def __init__(self, matches: List[str] = None, warnings: str = None, has_registered_test_suites: bool = False, has_run_registered_test_suites: bool = False, has_test_main: bool = False): self.matches = matches self.warnings = warnings self.has_registered_test_suites = has_registered_test_suites self.has_run_registered_test_suites = has_run_registered_test_suites self.has_test_main = has_test_main def __eq__(self, other): if not isinstance(other, ScanPathResult): return False return (sorted(self.matches) == sorted(other.matches) and self.warnings == other.warnings and (self.has_registered_test_suites == other.has_registered_test_suites) and (self.has_run_registered_test_suites == other.has_run_registered_test_suites) and self.has_test_main == other.has_test_main) class TestCase(DisablePyTestCollectionMixin): """Class representing a test application """ def __init__(self, testcase_root, workdir, name): """TestCase constructor. This gets called by TestSuite as it finds and reads test yaml files. Multiple TestCase instances may be generated from a single testcase.yaml, each one corresponds to an entry within that file. We need to have a unique name for every single test case. Since a testcase.yaml can define multiple tests, the canonical name for the test case is <workdir>/<name>. @param testcase_root os.path.abspath() of one of the --testcase-root @param workdir Sub-directory of testcase_root where the .yaml test configuration file was found @param name Name of this test case, corresponding to the entry name in the test case configuration file. For many test cases that just define one test, can be anything and is usually "test". This is really only used to distinguish between different cases when the testcase.yaml defines multiple tests """ self.source_dir = "" self.yamlfile = "" self.cases = [] self.name = self.get_unique(testcase_root, workdir, name) self.id = name self.type = None self.tags = set() self.extra_args = None self.extra_configs = None self.arch_allow = None self.arch_exclude = None self.skip = False self.platform_exclude = None self.platform_allow = None self.toolchain_exclude = None self.toolchain_allow = None self.tc_filter = None self.timeout = 60 self.harness = "" self.harness_config = {} self.build_only = True self.build_on_all = False self.slow = False self.min_ram = -1 self.depends_on = None self.min_flash = -1 self.extra_sections = None self.integration_platforms = [] @staticmethod def get_unique(testcase_root, workdir, name): canonical_testcase_root = os.path.realpath(testcase_root) if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents: # This is in ZEPHYR_BASE, so include path in name for uniqueness # FIXME: We should not depend on path of test for unique names. relative_tc_root = os.path.relpath(canonical_testcase_root, start=canonical_zephyr_base) else: relative_tc_root = "" # workdir can be "." unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name)) check = name.split(".") if len(check) < 2: raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \ Tests should reference the category and subsystem with a dot as a separator. """ ) return unique @staticmethod def scan_file(inf_name): suite_regex = re.compile( # do not match until end-of-line, otherwise we won't allow # stc_regex below to catch the ones that are declared in the same # line--as we only search starting the end of this match br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) registered_suite_regex = re.compile( br"^\s*ztest_register_test_suite" br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) # Checks if the file contains a definition of "void test_main(void)" # Since ztest provides a plain test_main implementation it is OK to: # 1. register test suites and not call the run function iff the test # doesn't have a custom test_main. # 2. register test suites and a custom test_main definition iff the test # also calls ztest_run_registered_test_suites. test_main_regex = re.compile( br"^\s*void\s+test_main\(void\)", re.MULTILINE) stc_regex = re.compile( br"""^\s* # empy space at the beginning is ok # catch the case where it is declared in the same sentence, e.g: # # ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME)); # ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME), (?:ztest_ (?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*) [a-zA-Z0-9_]+\s*,\s* )? # Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME) ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)? # Consume the argument that becomes the extra testcse \(\s*(?P<stc_name>[a-zA-Z0-9_]+) # _setup_teardown() variant has two extra arguments that we ignore (?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)? \s*\)""", # We don't check how it finishes; we don't care re.MULTILINE | re.VERBOSE) suite_run_regex = re.compile( br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)", re.MULTILINE) registered_suite_run_regex = re.compile( br"^\s*ztest_run_registered_test_suites\(" br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)", re.MULTILINE) achtung_regex = re.compile( br"(#ifdef|#endif)", re.MULTILINE) warnings = None has_registered_test_suites = False has_run_registered_test_suites = False has_test_main = False with open(inf_name) as inf: if os.name == 'nt': mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ} else: mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ, 'offset': 0} with contextlib.closing(mmap.mmap(**mmap_args)) as main_c: suite_regex_match = suite_regex.search(main_c) registered_suite_regex_match = registered_suite_regex.search( main_c) if registered_suite_regex_match: has_registered_test_suites = True if registered_suite_run_regex.search(main_c): has_run_registered_test_suites = True if test_main_regex.search(main_c): has_test_main = True if not suite_regex_match and not has_registered_test_suites: # can't find ztest_test_suite, maybe a client, because # it includes ztest.h return ScanPathResult( matches=None, warnings=None, has_registered_test_suites=has_registered_test_suites, has_run_registered_test_suites=has_run_registered_test_suites, has_test_main=has_test_main) suite_run_match = suite_run_regex.search(main_c) if suite_regex_match and not suite_run_match: raise ValueError("can't find ztest_run_test_suite") if suite_regex_match: search_start = suite_regex_match.end() else: search_start = registered_suite_regex_match.end() if suite_run_match: search_end = suite_run_match.start() else: search_end = re.compile(br"\);", re.MULTILINE) \ .search(main_c, search_start) \ .end() achtung_matches = re.findall( achtung_regex, main_c[search_start:search_end]) if achtung_matches: warnings = "found invalid %s in ztest_test_suite()" \ % ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True)) _matches = re.findall( stc_regex, main_c[search_start:search_end]) for match in _matches: if not match.decode().startswith("test_"): warnings = "Found a test that does not start with test_" matches = [match.decode().replace("test_", "", 1) for match in _matches] return ScanPathResult( matches=matches, warnings=warnings, has_registered_test_suites=has_registered_test_suites, has_run_registered_test_suites=has_run_registered_test_suites, has_test_main=has_test_main) def scan_path(self, path): subcases = [] has_registered_test_suites = False has_run_registered_test_suites = False has_test_main = False for filename in glob.glob(os.path.join(path, "src", "*.c*")): try: result: ScanPathResult = self.scan_file(filename) if result.warnings: logger.error("%s: %s" % (filename, result.warnings)) raise TwisterRuntimeError( "%s: %s" % (filename, result.warnings)) if result.matches: subcases += result.matches if result.has_registered_test_suites: has_registered_test_suites = True if result.has_run_registered_test_suites: has_run_registered_test_suites = True if result.has_test_main: has_test_main = True except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) for filename in glob.glob(os.path.join(path, "*.c")): try: result: ScanPathResult = self.scan_file(filename) if result.warnings: logger.error("%s: %s" % (filename, result.warnings)) if result.matches: subcases += result.matches except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) if (has_registered_test_suites and has_test_main and not has_run_registered_test_suites): warning = \ "Found call to 'ztest_register_test_suite()' but no "\ "call to 'ztest_run_registered_test_suites()'" logger.error(warning) raise TwisterRuntimeError(warning) return subcases def parse_subcases(self, test_path): results = self.scan_path(test_path) for sub in results: name = "{}.{}".format(self.id, sub) self.cases.append(name) if not results: self.cases.append(self.id) def __str__(self): return self.name class TestInstance(DisablePyTestCollectionMixin): """Class representing the execution of a particular TestCase on a platform @param test The TestCase object we want to build/execute @param platform Platform object that we want to build and run against @param base_outdir Base directory for all test results. The actual out directory used is <outdir>/<platform>/<test case name> """ def __init__(self, testcase, platform, outdir): self.testcase = testcase self.platform = platform self.status = None self.reason = "Unknown" self.metrics = dict() self.handler = None self.outdir = outdir self.name = os.path.join(platform.name, testcase.name) self.build_dir = os.path.join(outdir, platform.name, testcase.name) self.run = False self.results = {} def __getstate__(self): d = self.__dict__.copy() return d def __setstate__(self, d): self.__dict__.update(d) def __lt__(self, other): return self.name < other.name @staticmethod def testcase_runnable(testcase, fixtures): can_run = False # console harness allows us to run the test and capture data. if testcase.harness in [ 'console', 'ztest', 'pytest']: can_run = True # if we have a fixture that is also being supplied on the # command-line, then we need to run the test, not just build it. fixture = testcase.harness_config.get('fixture') if fixture: can_run = (fixture in fixtures) elif testcase.harness: can_run = False else: can_run = True return can_run # Global testsuite parameters def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]): # right now we only support building on windows. running is still work # in progress. if os.name == 'nt': return False # we asked for build-only on the command line if self.testcase.build_only: return False # Do not run slow tests: skip_slow = self.testcase.slow and not enable_slow if skip_slow: return False target_ready = bool(self.testcase.type == "unit" or \ self.platform.type == "native" or \ self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \ filter == 'runnable') if self.platform.simulation == "nsim": if not find_executable("nsimdrv"): target_ready = False if self.platform.simulation == "mdb-nsim": if not find_executable("mdb"): target_ready = False if self.platform.simulation == "renode": if not find_executable("renode"): target_ready = False if self.platform.simulation == "tsim": if not find_executable("tsim-leon3"): target_ready = False testcase_runnable = self.testcase_runnable(self.testcase, fixtures) return testcase_runnable and target_ready def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]): # Create this in a "twister/" subdirectory otherwise this # will pass this overlay to kconfig.py *twice* and kconfig.cmake # will silently give that second time precedence over any # --extra-args=CONFIG_* subdir = os.path.join(self.build_dir, "twister") content = "" if self.testcase.extra_configs: content = "\n".join(self.testcase.extra_configs) if enable_coverage: if platform.name in coverage_platform: content = content + "\nCONFIG_COVERAGE=y" content = content + "\nCONFIG_COVERAGE_DUMP=y" if enable_asan: if platform.type == "native": content = content + "\nCONFIG_ASAN=y" if enable_ubsan: if platform.type == "native": content = content + "\nCONFIG_UBSAN=y" if content: os.makedirs(subdir, exist_ok=True) file = os.path.join(subdir, "testcase_extra.conf") with open(file, "w") as f: f.write(content) return content def calculate_sizes(self): """Get the RAM/ROM sizes of a test case. This can only be run after the instance has been executed by MakeGenerator, otherwise there won't be any binaries to measure. @return A SizeCalculator object """ fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf")) fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe"))) fns = [x for x in fns if not x.endswith('_prebuilt.elf')] if len(fns) != 1: raise BuildError("Missing/multiple output ELF binary") return SizeCalculator(fns[0], self.testcase.extra_sections) def fill_results_by_status(self): """Fills results according to self.status The method is used to propagate the instance level status to the test cases inside. Useful when the whole instance is skipped and the info is required also at the test cases level for reporting. Should be used with caution, e.g. should not be used to fill all results with passes """ status_to_verdict = { 'skipped': 'SKIP', 'error': 'BLOCK', 'failure': 'FAILED' } for k in self.results: self.results[k] = status_to_verdict[self.status] def __repr__(self): return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name) class CMake(): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') def __init__(self, testcase, platform, source_dir, build_dir): self.cwd = None self.capture_output = True self.defconfig = {} self.cmake_cache = {} self.instance = None self.testcase = testcase self.platform = platform self.source_dir = source_dir self.build_dir = build_dir self.log = "build.log" self.generator = None self.generator_cmd = None def parse_generated(self): self.defconfig = {} return {} def run_build(self, args=[]): logger.debug("Building %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [] cmake_args.extend(args) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() results = {} if p.returncode == 0: msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) self.instance.status = "passed" results = {'msg': msg, "returncode": p.returncode, "instance": self.instance} if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) else: return None else: # A real error occurred, raise an exception log_msg = "" if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) if log_msg: res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg) if res and not self.overflow_as_errors: logger.debug("Test skipped due to {} Overflow".format(res[0])) self.instance.status = "skipped" self.instance.reason = "{} overflow".format(res[0]) else: self.instance.status = "error" self.instance.reason = "Build failure" results = { "returncode": p.returncode, "instance": self.instance, } return results def run_cmake(self, args=[]): if self.warnings_as_errors: ldflags = "-Wl,--fatal-warnings" cflags = "-Werror" aflags = "-Wa,--fatal-warnings" gen_defines_args = "--edtlib-Werror" else: ldflags = cflags = aflags = "" gen_defines_args = "" logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [ f'-B{self.build_dir}', f'-S{self.source_dir}', f'-DEXTRA_CFLAGS="{cflags}"', f'-DEXTRA_AFLAGS="{aflags}', f'-DEXTRA_LDFLAGS="{ldflags}"', f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}', f'-G{self.generator}' ] if self.cmake_only: cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1") args = ["-D{}".format(a.replace('"', '')) for a in args] cmake_args.extend(args) cmake_opts = ['-DBOARD={}'.format(self.platform.name)] cmake_args.extend(cmake_opts) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() if p.returncode == 0: filter_results = self.parse_generated() msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) logger.debug(msg) results = {'msg': msg, 'filter': filter_results} else: self.instance.status = "error" self.instance.reason = "Cmake build failure" self.instance.fill_results_by_status() logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name)) results = {"returncode": p.returncode} if out: with open(os.path.join(self.build_dir, self.log), "a") as log: log_msg = out.decode(sys.getdefaultencoding()) log.write(log_msg) return results @staticmethod def run_cmake_script(args=[]): logger.debug("Running cmake script %s" % (args[0])) cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]] cmake_args.extend(['-P', args[0]]) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') if not cmake: msg = "Unable to find `cmake` in path" logger.error(msg) raise Exception(msg) cmd = [cmake] + cmake_args kwargs = dict() kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() # It might happen that the environment adds ANSI escape codes like \x1b[0m, # for instance if twister is executed from inside a makefile. In such a # scenario it is then necessary to remove them, as otherwise the JSON decoding # will fail. ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') out = ansi_escape.sub('', out.decode()) if p.returncode == 0: msg = "Finished running %s" % (args[0]) logger.debug(msg) results = {"returncode": p.returncode, "msg": msg, "stdout": out} else: logger.error("Cmake script failure: %s" % (args[0])) results = {"returncode": p.returncode, "returnmsg": out} return results class FilterBuilder(CMake): def __init__(self, testcase, platform, source_dir, build_dir): super().__init__(testcase, platform, source_dir, build_dir) self.log = "config-twister.log" def parse_generated(self): if self.platform.name == "unit_testing": return {} cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt") defconfig_path = os.path.join(self.build_dir, "zephyr", ".config") with open(defconfig_path, "r") as fp: defconfig = {} for line in fp.readlines(): m = self.config_re.match(line) if not m: if line.strip() and not line.startswith("#"): sys.stderr.write("Unrecognized line %s\n" % line) continue defconfig[m.group(1)] = m.group(2).strip() self.defconfig = defconfig cmake_conf = {} try: cache = CMakeCache.from_file(cmake_cache_path) except FileNotFoundError: cache = {} for k in iter(cache): cmake_conf[k.name] = k.value self.cmake_cache = cmake_conf filter_data = { "ARCH": self.platform.arch, "PLATFORM": self.platform.name } filter_data.update(os.environ) filter_data.update(self.defconfig) filter_data.update(self.cmake_cache) edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle") if self.testcase and self.testcase.tc_filter: try: if os.path.exists(edt_pickle): with open(edt_pickle, 'rb') as f: edt = pickle.load(f) else: edt = None res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt) except (ValueError, SyntaxError) as se: sys.stderr.write( "Failed processing %s\n" % self.testcase.yamlfile) raise se if not res: return {os.path.join(self.platform.name, self.testcase.name): True} else: return {os.path.join(self.platform.name, self.testcase.name): False} else: self.platform.filter_data = filter_data return filter_data class ProjectBuilder(FilterBuilder): def __init__(self, suite, instance, **kwargs): super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir) self.log = "build.log" self.instance = instance self.suite = suite self.filtered_tests = 0 self.lsan = kwargs.get('lsan', False) self.asan = kwargs.get('asan', False) self.ubsan = kwargs.get('ubsan', False) self.valgrind = kwargs.get('valgrind', False) self.extra_args = kwargs.get('extra_args', []) self.device_testing = kwargs.get('device_testing', False) self.cmake_only = kwargs.get('cmake_only', False) self.cleanup = kwargs.get('cleanup', False) self.coverage = kwargs.get('coverage', False) self.inline_logs = kwargs.get('inline_logs', False) self.generator = kwargs.get('generator', None) self.generator_cmd = kwargs.get('generator_cmd', None) self.verbose = kwargs.get('verbose', None) self.warnings_as_errors = kwargs.get('warnings_as_errors', True) self.overflow_as_errors = kwargs.get('overflow_as_errors', False) @staticmethod def log_info(filename, inline_logs): filename = os.path.abspath(os.path.realpath(filename)) if inline_logs: logger.info("{:-^100}".format(filename)) try: with open(filename) as fp: data = fp.read() except Exception as e: data = "Unable to read log data (%s)\n" % (str(e)) logger.error(data) logger.info("{:-^100}".format(filename)) else: logger.error("see: " + Fore.YELLOW + filename + Fore.RESET) def log_info_file(self, inline_logs): build_dir = self.instance.build_dir h_log = "{}/handler.log".format(build_dir) b_log = "{}/build.log".format(build_dir) v_log = "{}/valgrind.log".format(build_dir) d_log = "{}/device.log".format(build_dir) if os.path.exists(v_log) and "Valgrind" in self.instance.reason: self.log_info("{}".format(v_log), inline_logs) elif os.path.exists(h_log) and os.path.getsize(h_log) > 0: self.log_info("{}".format(h_log), inline_logs) elif os.path.exists(d_log) and os.path.getsize(d_log) > 0: self.log_info("{}".format(d_log), inline_logs) else: self.log_info("{}".format(b_log), inline_logs) def setup_handler(self): instance = self.instance args = [] # FIXME: Needs simplification if instance.platform.simulation == "qemu": instance.handler = QEMUHandler(instance, "qemu") args.append("QEMU_PIPE=%s" % instance.handler.get_fifo()) instance.handler.call_make_run = True elif instance.testcase.type == "unit": instance.handler = BinaryHandler(instance, "unit") instance.handler.binary = os.path.join(instance.build_dir, "testbinary") if self.coverage: args.append("COVERAGE=1") elif instance.platform.type == "native": handler = BinaryHandler(instance, "native") handler.asan = self.asan handler.valgrind = self.valgrind handler.lsan = self.lsan handler.ubsan = self.ubsan handler.coverage = self.coverage handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe") instance.handler = handler elif instance.platform.simulation == "renode": if find_executable("renode"): instance.handler = BinaryHandler(instance, "renode") instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid") instance.handler.call_make_run = True elif instance.platform.simulation == "tsim": instance.handler = BinaryHandler(instance, "tsim") instance.handler.call_make_run = True elif self.device_testing: instance.handler = DeviceHandler(instance, "device") instance.handler.coverage = self.coverage elif instance.platform.simulation == "nsim": if find_executable("nsimdrv"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "mdb-nsim": if find_executable("mdb"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "armfvp": instance.handler = BinaryHandler(instance, "armfvp") instance.handler.call_make_run = True if instance.handler: instance.handler.args = args instance.handler.generator_cmd = self.generator_cmd instance.handler.generator = self.generator def process(self, pipeline, done, message, lock, results): op = message.get('op') if not self.instance.handler: self.setup_handler() # The build process, call cmake and build with configured generator if op == "cmake": res = self.cmake() if self.instance.status in ["failed", "error"]: pipeline.put({"op": "report", "test": self.instance}) elif self.cmake_only: if self.instance.status is None: self.instance.status = "passed" pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.name in res['filter'] and res['filter'][self.instance.name]: logger.debug("filtering %s" % self.instance.name) self.instance.status = "skipped" self.instance.reason = "filter" results.skipped_runtime += 1 for case in self.instance.testcase.cases: self.instance.results.update({case: 'SKIP'}) pipeline.put({"op": "report", "test": self.instance}) else: pipeline.put({"op": "build", "test": self.instance}) elif op == "build": logger.debug("build test: %s" % self.instance.name) res = self.build() if not res: self.instance.status = "error" self.instance.reason = "Build Failure" pipeline.put({"op": "report", "test": self.instance}) else: # Count skipped cases during build, for example # due to ram/rom overflow. inst = res.get("instance", None) if inst and inst.status == "skipped": results.skipped_runtime += 1 if res.get('returncode', 1) > 0: pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.run and self.instance.handler: pipeline.put({"op": "run", "test": self.instance}) else: pipeline.put({"op": "report", "test": self.instance}) # Run the generated binary using one of the supported handlers elif op == "run": logger.debug("run test: %s" % self.instance.name) self.run() self.instance.status, _ = self.instance.handler.get_state() logger.debug(f"run status: {self.instance.name} {self.instance.status}") # to make it work with pickle self.instance.handler.thread = None self.instance.handler.suite = None pipeline.put({ "op": "report", "test": self.instance, "status": self.instance.status, "reason": self.instance.reason } ) # Report results and output progress to screen elif op == "report": with lock: done.put(self.instance) self.report_out(results) if self.cleanup and not self.coverage and self.instance.status == "passed": pipeline.put({ "op": "cleanup", "test": self.instance }) elif op == "cleanup": if self.device_testing: self.cleanup_device_testing_artifacts() else: self.cleanup_artifacts() def cleanup_artifacts(self, additional_keep=[]): logger.debug("Cleaning up {}".format(self.instance.build_dir)) allow = [ 'zephyr/.config', 'handler.log', 'build.log', 'device.log', 'recording.csv', ] allow += additional_keep allow = [os.path.join(self.instance.build_dir, file) for file in allow] for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False): for name in filenames: path = os.path.join(dirpath, name) if path not in allow: os.remove(path) # Remove empty directories and symbolic links to directories for dir in dirnames: path = os.path.join(dirpath, dir) if os.path.islink(path): os.remove(path) elif not os.listdir(path): os.rmdir(path) def cleanup_device_testing_artifacts(self): logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir)) sanitizelist = [ 'CMakeCache.txt', 'zephyr/runners.yaml', ] keep = [ 'zephyr/zephyr.hex', 'zephyr/zephyr.bin', 'zephyr/zephyr.elf', ] keep += sanitizelist self.cleanup_artifacts(keep) # sanitize paths so files are relocatable for file in sanitizelist: file = os.path.join(self.instance.build_dir, file) with open(file, "rt") as fin: data = fin.read() data = data.replace(canonical_zephyr_base+"/", "") with open(file, "wt") as fin: fin.write(data) def report_out(self, results): total_to_do = results.total - results.skipped_configs total_tests_width = len(str(total_to_do)) results.done += 1 instance = self.instance if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.status == "error": results.error += 1 results.failed += 1 if self.verbose: status = Fore.RED + "FAILED " + Fore.RESET + instance.reason else: print("") logger.error( "{:<25} {:<50} {}FAILED{}: {}".format( instance.platform.name, instance.testcase.name, Fore.RED, Fore.RESET, instance.reason)) if not self.verbose: self.log_info_file(self.inline_logs) elif instance.status == "skipped": status = Fore.YELLOW + "SKIPPED" + Fore.RESET elif instance.status == "passed": status = Fore.GREEN + "PASSED" + Fore.RESET else: logger.debug(f"Unknown status = {instance.status}") status = Fore.YELLOW + "UNKNOWN" + Fore.RESET if self.verbose: if self.cmake_only: more_info = "cmake" elif instance.status == "skipped": more_info = instance.reason else: if instance.handler and instance.run: more_info = instance.handler.type_str htime = instance.handler.duration if htime: more_info += " {:.3f}s".format(htime) else: more_info = "build" logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( results.done, total_tests_width, total_to_do, instance.platform.name, instance.testcase.name, status, more_info)) if instance.status in ["error", "failed", "timeout"]: self.log_info_file(self.inline_logs) else: completed_perc = 0 if total_to_do > 0: completed_perc = int((float(results.done) / total_to_do) * 100) skipped = results.skipped_configs + results.skipped_runtime sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % ( Fore.GREEN, results.done, total_to_do, Fore.RESET, completed_perc, Fore.YELLOW if skipped > 0 else Fore.RESET, skipped, Fore.RESET, Fore.RED if results.failed > 0 else Fore.RESET, results.failed, Fore.RESET ) ) sys.stdout.flush() def cmake(self): instance = self.instance args = self.testcase.extra_args[:] args += self.extra_args if instance.handler: args += instance.handler.args # merge overlay files into one variable def extract_overlays(args): re_overlay = re.compile('OVERLAY_CONFIG=(.*)') other_args = [] overlays = [] for arg in args: match = re_overlay.search(arg) if match: overlays.append(match.group(1).strip('\'"')) else: other_args.append(arg) args[:] = other_args return overlays overlays = extract_overlays(args) if os.path.exists(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")): overlays.append(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")) if overlays: args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays))) res = self.run_cmake(args) return res def build(self): res = self.run_build(['--build', self.build_dir]) return res def run(self): instance = self.instance if instance.handler: if instance.handler.type_str == "device": instance.handler.suite = self.suite instance.handler.handle() sys.stdout.flush() class TestSuite(DisablePyTestCollectionMixin): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') tc_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "testcase-schema.yaml")) quarantine_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "quarantine-schema.yaml")) testcase_valid_keys = {"tags": {"type": "set", "required": False}, "type": {"type": "str", "default": "integration"}, "extra_args": {"type": "list"}, "extra_configs": {"type": "list"}, "build_only": {"type": "bool", "default": False}, "build_on_all": {"type": "bool", "default": False}, "skip": {"type": "bool", "default": False}, "slow": {"type": "bool", "default": False}, "timeout": {"type": "int", "default": 60}, "min_ram": {"type": "int", "default": 8}, "depends_on": {"type": "set"}, "min_flash": {"type": "int", "default": 32}, "arch_allow": {"type": "set"}, "arch_exclude": {"type": "set"}, "extra_sections": {"type": "list", "default": []}, "integration_platforms": {"type": "list", "default": []}, "platform_exclude": {"type": "set"}, "platform_allow": {"type": "set"}, "toolchain_exclude": {"type": "set"}, "toolchain_allow": {"type": "set"}, "filter": {"type": "str"}, "harness": {"type": "str"}, "harness_config": {"type": "map", "default": {}} } RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release", "twister_last_release.csv") SAMPLE_FILENAME = 'sample.yaml' TESTCASE_FILENAME = 'testcase.yaml' def __init__(self, board_root_list=[], testcase_roots=[], outdir=None): self.roots = testcase_roots if not isinstance(board_root_list, list): self.board_roots = [board_root_list] else: self.board_roots = board_root_list # Testsuite Options self.coverage_platform = [] self.build_only = False self.cmake_only = False self.cleanup = False self.enable_slow = False self.device_testing = False self.fixtures = [] self.enable_coverage = False self.enable_ubsan = False self.enable_lsan = False self.enable_asan = False self.enable_valgrind = False self.extra_args = [] self.inline_logs = False self.enable_sizes_report = False self.west_flash = None self.west_runner = None self.generator = None self.generator_cmd = None self.warnings_as_errors = True self.overflow_as_errors = False self.quarantine_verify = False # Keep track of which test cases we've filtered out and why self.testcases = {} self.quarantine = {} self.platforms = [] self.selected_platforms = [] self.filtered_platforms = [] self.default_platforms = [] self.outdir = os.path.abspath(outdir) self.discards = {} self.load_errors = 0 self.instances = dict() self.total_platforms = 0 self.start_time = 0 self.duration = 0 self.warnings = 0 # hardcoded for now self.duts = [] # run integration tests only self.integration = False self.pipeline = None self.version = "NA" def check_zephyr_version(self): try: subproc = subprocess.run(["git", "describe", "--abbrev=12"], stdout=subprocess.PIPE, universal_newlines=True, cwd=ZEPHYR_BASE) if subproc.returncode == 0: self.version = subproc.stdout.strip() logger.info(f"Zephyr version: {self.version}") except OSError: logger.info("Cannot read zephyr version.") def get_platform_instances(self, platform): filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)} return filtered_dict def config(self): logger.info("coverage platform: {}".format(self.coverage_platform)) # Debug Functions @staticmethod def info(what): sys.stdout.write(what + "\n") sys.stdout.flush() def update_counting(self, results=None, initial=False): results.skipped_configs = 0 results.skipped_cases = 0 for instance in self.instances.values(): if initial: results.cases += len(instance.testcase.cases) if instance.status == 'skipped': results.skipped_configs += 1 results.skipped_cases += len(instance.testcase.cases) elif instance.status == "passed": results.passed += 1 for res in instance.results.values(): if res == 'SKIP': results.skipped_cases += 1 def compare_metrics(self, filename): # name, datatype, lower results better interesting_metrics = [("ram_size", int, True), ("rom_size", int, True)] if not os.path.exists(filename): logger.error("Cannot compare metrics, %s not found" % filename) return [] results = [] saved_metrics = {} with open(filename) as fp: cr = csv.DictReader(fp) for row in cr: d = {} for m, _, _ in interesting_metrics: d[m] = row[m] saved_metrics[(row["test"], row["platform"])] = d for instance in self.instances.values(): mkey = (instance.testcase.name, instance.platform.name) if mkey not in saved_metrics: continue sm = saved_metrics[mkey] for metric, mtype, lower_better in interesting_metrics: if metric not in instance.metrics: continue if sm[metric] == "": continue delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) if delta == 0: continue results.append((instance, metric, instance.metrics.get(metric, 0), delta, lower_better)) return results def footprint_reports(self, report, show_footprint, all_deltas, footprint_threshold, last_metrics): if not report: return logger.debug("running footprint_reports") deltas = self.compare_metrics(report) warnings = 0 if deltas and show_footprint: for i, metric, value, delta, lower_better in deltas: if not all_deltas and ((delta < 0 and lower_better) or (delta > 0 and not lower_better)): continue percentage = 0 if value > delta: percentage = (float(delta) / float(value - delta)) if not all_deltas and (percentage < (footprint_threshold / 100.0)): continue logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( i.platform.name, i.testcase.name, Fore.YELLOW, "INFO" if all_deltas else "WARNING", Fore.RESET, metric, delta, value, percentage)) warnings += 1 if warnings: logger.warning("Deltas based on metrics from last %s" % ("release" if not last_metrics else "run")) def summary(self, results, unrecognized_sections): failed = 0 run = 0 for instance in self.instances.values(): if instance.status == "failed": failed += 1 elif instance.metrics.get("unrecognized") and not unrecognized_sections: logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" % (Fore.RED, Fore.RESET, instance.name, str(instance.metrics.get("unrecognized", [])))) failed += 1 if instance.metrics.get('handler_time', None): run += 1 if results.total and results.total != results.skipped_configs: pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) else: pass_rate = 0 logger.info( "{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format( Fore.RED if failed else Fore.GREEN, results.passed, results.total - results.skipped_configs, Fore.RESET, pass_rate, Fore.RED if results.failed else Fore.RESET, results.failed, Fore.RESET, results.skipped_configs, Fore.YELLOW if self.warnings else Fore.RESET, self.warnings, Fore.RESET, self.duration)) self.total_platforms = len(self.platforms) # if we are only building, do not report about tests being executed. if self.platforms and not self.build_only: logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format( results.cases - results.skipped_cases, results.skipped_cases, len(self.filtered_platforms), self.total_platforms, (100 * len(self.filtered_platforms) / len(self.platforms)) )) logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ {Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.") def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report): if not self.instances: return logger.info("Saving reports...") if name: report_name = name else: report_name = "twister" if report_dir: os.makedirs(report_dir, exist_ok=True) filename = os.path.join(report_dir, report_name) outdir = report_dir else: filename = os.path.join(self.outdir, report_name) outdir = self.outdir if suffix: filename = "{}_{}".format(filename, suffix) if not no_update: self.xunit_report(filename + ".xml", full_report=False, append=only_failed, version=self.version) self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed, version=self.version) self.csv_report(filename + ".csv") if json_report: self.json_report(filename + ".json", append=only_failed, version=self.version) if platform_reports: self.target_report(outdir, suffix, append=only_failed) if self.discards: self.discard_report(filename + "_discard.csv") if release: self.csv_report(self.RELEASE_DATA) def add_configurations(self): for board_root in self.board_roots: board_root = os.path.abspath(board_root) logger.debug("Reading platform configuration files under %s..." % board_root) for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")): try: platform = Platform() platform.load(file) if platform.name in [p.name for p in self.platforms]: logger.error(f"Duplicate platform {platform.name} in {file}") raise Exception(f"Duplicate platform identifier {platform.name} found") if platform.twister: self.platforms.append(platform) if platform.default: self.default_platforms.append(platform.name) except RuntimeError as e: logger.error("E: %s: can't load: %s" % (file, e)) self.load_errors += 1 def get_all_tests(self): tests = [] for _, tc in self.testcases.items(): for case in tc.cases: tests.append(case) return tests @staticmethod def get_toolchain(): toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake') result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"]) try: if result['returncode']: raise TwisterRuntimeError(f"E: {result['returnmsg']}") except Exception as e: print(str(e)) sys.exit(2) toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT'] logger.info(f"Using '{toolchain}' toolchain.") return toolchain def add_testcases(self, testcase_filter=[]): for root in self.roots: root = os.path.abspath(root) logger.debug("Reading test case configuration files under %s..." % root) for dirpath, _, filenames in os.walk(root, topdown=True): if self.SAMPLE_FILENAME in filenames: filename = self.SAMPLE_FILENAME elif self.TESTCASE_FILENAME in filenames: filename = self.TESTCASE_FILENAME else: continue logger.debug("Found possible test case in " + dirpath) tc_path = os.path.join(dirpath, filename) try: parsed_data = TwisterConfigParser(tc_path, self.tc_schema) parsed_data.load() tc_path = os.path.dirname(tc_path) workdir = os.path.relpath(tc_path, root) for name in parsed_data.tests.keys(): tc = TestCase(root, workdir, name) tc_dict = parsed_data.get_test(name, self.testcase_valid_keys) tc.source_dir = tc_path tc.yamlfile = tc_path tc.type = tc_dict["type"] tc.tags = tc_dict["tags"] tc.extra_args = tc_dict["extra_args"] tc.extra_configs = tc_dict["extra_configs"] tc.arch_allow = tc_dict["arch_allow"] tc.arch_exclude = tc_dict["arch_exclude"] tc.skip = tc_dict["skip"] tc.platform_exclude = tc_dict["platform_exclude"] tc.platform_allow = tc_dict["platform_allow"] tc.toolchain_exclude = tc_dict["toolchain_exclude"] tc.toolchain_allow = tc_dict["toolchain_allow"] tc.tc_filter = tc_dict["filter"] tc.timeout = tc_dict["timeout"] tc.harness = tc_dict["harness"] tc.harness_config = tc_dict["harness_config"] if tc.harness == 'console' and not tc.harness_config: raise Exception('Harness config error: console harness defined without a configuration.') tc.build_only = tc_dict["build_only"] tc.build_on_all = tc_dict["build_on_all"] tc.slow = tc_dict["slow"] tc.min_ram = tc_dict["min_ram"] tc.depends_on = tc_dict["depends_on"] tc.min_flash = tc_dict["min_flash"] tc.extra_sections = tc_dict["extra_sections"] tc.integration_platforms = tc_dict["integration_platforms"] tc.parse_subcases(tc_path) if testcase_filter: if tc.name and tc.name in testcase_filter: self.testcases[tc.name] = tc else: self.testcases[tc.name] = tc except Exception as e: logger.error("%s: can't load (skipping): %s" % (tc_path, e)) self.load_errors += 1 return len(self.testcases) def get_platform(self, name): selected_platform = None for platform in self.platforms: if platform.name == name: selected_platform = platform break return selected_platform def load_quarantine(self, file): """ Loads quarantine list from the given yaml file. Creates a dictionary of all tests configurations (platform + scenario: comment) that shall be skipped due to quarantine """ # Load yaml into quarantine_yaml quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema) # Create quarantine_list with a product of the listed # platforms and scenarios for each entry in quarantine yaml quarantine_list = [] for quar_dict in quarantine_yaml: if quar_dict['platforms'][0] == "all": plat = [p.name for p in self.platforms] else: plat = quar_dict['platforms'] comment = quar_dict.get('comment', "NA") quarantine_list.append([{".".join([p, s]): comment} for p in plat for s in quar_dict['scenarios']]) # Flatten the quarantine_list quarantine_list = [it for sublist in quarantine_list for it in sublist] # Change quarantine_list into a dictionary for d in quarantine_list: self.quarantine.update(d) def load_from_file(self, file, filter_status=[], filter_platform=[]): try: with open(file, "r") as fp: cr = csv.DictReader(fp) instance_list = [] for row in cr: if row["status"] in filter_status: continue test = row["test"] platform = self.get_platform(row["platform"]) if filter_platform and platform.name not in filter_platform: continue instance = TestInstance(self.testcases[test], platform, self.outdir) if self.device_testing: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) instance_list.append(instance) self.add_instances(instance_list) except KeyError as e: logger.error("Key error while parsing tests file.({})".format(str(e))) sys.exit(2) except FileNotFoundError as e: logger.error("Couldn't find input file with list of tests. ({})".format(e)) sys.exit(2) def apply_filters(self, **kwargs): toolchain = self.get_toolchain() discards = {} platform_filter = kwargs.get('platform') exclude_platform = kwargs.get('exclude_platform', []) testcase_filter = kwargs.get('run_individual_tests', []) arch_filter = kwargs.get('arch') tag_filter = kwargs.get('tag') exclude_tag = kwargs.get('exclude_tag') all_filter = kwargs.get('all') runnable = kwargs.get('runnable') force_toolchain = kwargs.get('force_toolchain') force_platform = kwargs.get('force_platform') emu_filter = kwargs.get('emulation_only') logger.debug("platform filter: " + str(platform_filter)) logger.debug(" arch_filter: " + str(arch_filter)) logger.debug(" tag_filter: " + str(tag_filter)) logger.debug(" exclude_tag: " + str(exclude_tag)) default_platforms = False emulation_platforms = False if all_filter: logger.info("Selecting all possible platforms per test case") # When --all used, any --platform arguments ignored platform_filter = [] elif not platform_filter and not emu_filter: logger.info("Selecting default platforms per test case") default_platforms = True elif emu_filter: logger.info("Selecting emulation platforms per test case") emulation_platforms = True if platform_filter: platforms = list(filter(lambda p: p.name in platform_filter, self.platforms)) elif emu_filter: platforms = list(filter(lambda p: p.simulation != 'na', self.platforms)) elif arch_filter: platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms)) elif default_platforms: platforms = list(filter(lambda p: p.default, self.platforms)) else: platforms = self.platforms logger.info("Building initial testcase list...") for tc_name, tc in self.testcases.items(): if tc.build_on_all and not platform_filter: platform_scope = self.platforms elif tc.integration_platforms and self.integration: platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \ self.platforms)) else: platform_scope = platforms integration = self.integration and tc.integration_platforms # If there isn't any overlap between the platform_allow list and the platform_scope # we set the scope to the platform_allow list if tc.platform_allow and not platform_filter and not integration: a = set(platform_scope) b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms)) c = a.intersection(b) if not c: platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \ self.platforms)) # list of instances per testcase, aka configurations. instance_list = [] for plat in platform_scope: instance = TestInstance(tc, plat, self.outdir) if runnable: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) for t in tc.cases: instance.results[t] = None if runnable and self.duts: for h in self.duts: if h.platform == plat.name: if tc.harness_config.get('fixture') in h.fixtures: instance.run = True if not force_platform and plat.name in exclude_platform: discards[instance] = discards.get(instance, "Platform is excluded on command line.") if (plat.arch == "unit") != (tc.type == "unit"): # Discard silently continue if runnable and not instance.run: discards[instance] = discards.get(instance, "Not runnable on device") if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms: discards[instance] = discards.get(instance, "Not part of integration platforms") if tc.skip: discards[instance] = discards.get(instance, "Skip filter") if tag_filter and not tc.tags.intersection(tag_filter): discards[instance] = discards.get(instance, "Command line testcase tag filter") if exclude_tag and tc.tags.intersection(exclude_tag): discards[instance] = discards.get(instance, "Command line testcase exclude filter") if testcase_filter and tc_name not in testcase_filter: discards[instance] = discards.get(instance, "Testcase name filter") if arch_filter and plat.arch not in arch_filter: discards[instance] = discards.get(instance, "Command line testcase arch filter") if not force_platform: if tc.arch_allow and plat.arch not in tc.arch_allow: discards[instance] = discards.get(instance, "Not in test case arch allow list") if tc.arch_exclude and plat.arch in tc.arch_exclude: discards[instance] = discards.get(instance, "In test case arch exclude") if tc.platform_exclude and plat.name in tc.platform_exclude: discards[instance] = discards.get(instance, "In test case platform exclude") if tc.toolchain_exclude and toolchain in tc.toolchain_exclude: discards[instance] = discards.get(instance, "In test case toolchain exclude") if platform_filter and plat.name not in platform_filter: discards[instance] = discards.get(instance, "Command line platform filter") if tc.platform_allow and plat.name not in tc.platform_allow: discards[instance] = discards.get(instance, "Not in testcase platform allow list") if tc.toolchain_allow and toolchain not in tc.toolchain_allow: discards[instance] = discards.get(instance, "Not in testcase toolchain allow list") if not plat.env_satisfied: discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env))) if not force_toolchain \ and toolchain and (toolchain not in plat.supported_toolchains) \ and "host" not in plat.supported_toolchains \ and tc.type != 'unit': discards[instance] = discards.get(instance, "Not supported by the toolchain") if plat.ram < tc.min_ram: discards[instance] = discards.get(instance, "Not enough RAM") if tc.depends_on: dep_intersection = tc.depends_on.intersection(set(plat.supported)) if dep_intersection != set(tc.depends_on): discards[instance] = discards.get(instance, "No hardware support") if plat.flash < tc.min_flash: discards[instance] = discards.get(instance, "Not enough FLASH") if set(plat.ignore_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)") if plat.only_tags and not set(plat.only_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)") test_configuration = ".".join([instance.platform.name, instance.testcase.id]) # skip quarantined tests if test_configuration in self.quarantine and not self.quarantine_verify: discards[instance] = discards.get(instance, f"Quarantine: {self.quarantine[test_configuration]}") # run only quarantined test to verify their statuses (skip everything else) if self.quarantine_verify and test_configuration not in self.quarantine: discards[instance] = discards.get(instance, "Not under quarantine") # if nothing stopped us until now, it means this configuration # needs to be added. instance_list.append(instance) # no configurations, so jump to next testcase if not instance_list: continue # if twister was launched with no platform options at all, we # take all default platforms if default_platforms and not tc.build_on_all and not integration: if tc.platform_allow: a = set(self.default_platforms) b = set(tc.platform_allow) c = a.intersection(b) if c: aa = list(filter(lambda tc: tc.platform.name in c, instance_list)) self.add_instances(aa) else: self.add_instances(instance_list) else: instances = list(filter(lambda tc: tc.platform.default, instance_list)) self.add_instances(instances) elif integration: instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list)) self.add_instances(instances) elif emulation_platforms: self.add_instances(instance_list) for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)): discards[instance] = discards.get(instance, "Not an emulated platform") else: self.add_instances(instance_list) for _, case in self.instances.items(): case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) self.discards = discards self.selected_platforms = set(p.platform.name for p in self.instances.values()) remove_from_discards = [] # configurations to be removed from discards. for instance in self.discards: instance.reason = self.discards[instance] # If integration mode is on all skips on integration_platforms are treated as errors. if self.integration and instance.platform.name in instance.testcase.integration_platforms \ and "Quarantine" not in instance.reason: instance.status = "error" instance.reason += " but is one of the integration platforms" instance.fill_results_by_status() self.instances[instance.name] = instance # Such configuration has to be removed from discards to make sure it won't get skipped remove_from_discards.append(instance) else: instance.status = "skipped" instance.fill_results_by_status() self.filtered_platforms = set(p.platform.name for p in self.instances.values() if p.status != "skipped" ) # Remove from discards configururations that must not be discarded (e.g. integration_platforms when --integration was used) for instance in remove_from_discards: del self.discards[instance] return discards def add_instances(self, instance_list): for instance in instance_list: self.instances[instance.name] = instance @staticmethod def calc_one_elf_size(instance): if instance.status not in ["error", "failed", "skipped"]: if instance.platform.type != "native": size_calc = instance.calculate_sizes() instance.metrics["ram_size"] = size_calc.get_ram_size() instance.metrics["rom_size"] = size_calc.get_rom_size() instance.metrics["unrecognized"] = size_calc.unrecognized_sections() else: instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["unrecognized"] = [] instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False): for instance in self.instances.values(): if build_only: instance.run = False if instance.status not in ['passed', 'skipped', 'error']: logger.debug(f"adding {instance.name}") instance.status = None if test_only and instance.run: pipeline.put({"op": "run", "test": instance}) else: pipeline.put({"op": "cmake", "test": instance}) # If the instance got 'error' status before, proceed to the report stage if instance.status == "error": pipeline.put({"op": "report", "test": instance}) def pipeline_mgr(self, pipeline, done_queue, lock, results): while True: try: task = pipeline.get_nowait() except queue.Empty: break else: test = task['test'] pb = ProjectBuilder(self, test, lsan=self.enable_lsan, asan=self.enable_asan, ubsan=self.enable_ubsan, coverage=self.enable_coverage, extra_args=self.extra_args, device_testing=self.device_testing, cmake_only=self.cmake_only, cleanup=self.cleanup, valgrind=self.enable_valgrind, inline_logs=self.inline_logs, generator=self.generator, generator_cmd=self.generator_cmd, verbose=self.verbose, warnings_as_errors=self.warnings_as_errors, overflow_as_errors=self.overflow_as_errors ) pb.process(pipeline, done_queue, task, lock, results) return True def execute(self, pipeline, done, results): lock = Lock() logger.info("Adding tasks to the queue...") self.add_tasks_to_queue(pipeline, self.build_only, self.test_only) logger.info("Added initial list of jobs to queue") processes = [] for job in range(self.jobs): logger.debug(f"Launch process {job}") p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, )) processes.append(p) p.start() try: for p in processes: p.join() except KeyboardInterrupt: logger.info("Execution interrupted") for p in processes: p.terminate() # FIXME: This needs to move out. if self.enable_size_report and not self.cmake_only: # Parallelize size calculation executor = concurrent.futures.ThreadPoolExecutor(self.jobs) futures = [executor.submit(self.calc_one_elf_size, instance) for instance in self.instances.values()] concurrent.futures.wait(futures) else: for instance in self.instances.values(): instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 instance.metrics["unrecognized"] = [] return results def discard_report(self, filename): try: if not self.discards: raise TwisterRuntimeError("apply_filters() hasn't been run!") except Exception as e: logger.error(str(e)) sys.exit(2) with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "reason"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance, reason in sorted(self.discards.items()): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "reason": reason} cw.writerow(rowdict) def target_report(self, outdir, suffix, append=False): platforms = {inst.platform.name for _, inst in self.instances.items()} for platform in platforms: if suffix: filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix)) else: filename = os.path.join(outdir,"{}.xml".format(platform)) self.xunit_report(filename, platform, full_report=True, append=append, version=self.version) @staticmethod def process_log(log_file): filtered_string = "" if os.path.exists(log_file): with open(log_file, "rb") as f: log = f.read().decode("utf-8") filtered_string = ''.join(filter(lambda x: x in string.printable, log)) return filtered_string def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"): total = 0 fails = passes = errors = skips = 0 if platform: selected = [platform] logger.info(f"Writing target report for {platform}...") else: logger.info(f"Writing xunit report {filename}...") selected = self.selected_platforms if os.path.exists(filename) and append: tree = ET.parse(filename) eleTestsuites = tree.getroot() else: eleTestsuites = ET.Element('testsuites') for p in selected: inst = self.get_platform_instances(p) fails = 0 passes = 0 errors = 0 skips = 0 duration = 0 for _, instance in inst.items(): handler_time = instance.metrics.get('handler_time', 0) duration += handler_time if full_report and instance.run: for k in instance.results.keys(): if instance.results[k] == 'PASS': passes += 1 elif instance.results[k] == 'BLOCK': errors += 1 elif instance.results[k] == 'SKIP' or instance.status in ['skipped']: skips += 1 else: fails += 1 else: if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.reason in ['build_error', 'handler_crash']: errors += 1 else: fails += 1 elif instance.status == 'skipped': skips += 1 elif instance.status == 'passed': passes += 1 else: if instance.status: logger.error(f"{instance.name}: Unknown status {instance.status}") else: logger.error(f"{instance.name}: No status") total = (errors + passes + fails + skips) # do not produce a report if no tests were actually run (only built) if total == 0: continue run = p eleTestsuite = None # When we re-run the tests, we re-use the results and update only with # the newly run tests. if os.path.exists(filename) and append: ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]') if ts: eleTestsuite = ts[0] eleTestsuite.attrib['failures'] = "%d" % fails eleTestsuite.attrib['errors'] = "%d" % errors eleTestsuite.attrib['skipped'] = "%d" % skips else: logger.info(f"Did not find any existing results for {p}") eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) else: eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) for _, instance in inst.items(): if full_report: tname = os.path.basename(instance.testcase.name) else: tname = instance.testcase.id handler_time = instance.metrics.get('handler_time', 0) if full_report: for k in instance.results.keys(): # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'): eleTestsuite.remove(tc) classname = ".".join(tname.split(".")[:2]) eleTestcase = ET.SubElement( eleTestsuite, 'testcase', classname=classname, name="%s" % (k), time="%f" % handler_time) if instance.results[k] in ['FAIL', 'BLOCK'] or \ (not instance.run and instance.status in ["error", "failed", "timeout"]): if instance.results[k] == 'FAIL': el = ET.SubElement( eleTestcase, 'failure', type="failure", message="failed") else: el = ET.SubElement( eleTestcase, 'error', type="failure", message=instance.reason) log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name) log_file = os.path.join(log_root, "handler.log") el.text = self.process_log(log_file) elif instance.results[k] == 'PASS' \ or (not instance.run and instance.status in ["passed"]): pass elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]): el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason) else: el = ET.SubElement( eleTestcase, 'error', type="error", message=f"{instance.reason}") else: if platform: classname = ".".join(instance.testcase.name.split(".")[:2]) else: classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2]) # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'): eleTestsuite.remove(tc) eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname=classname, name="%s" % (instance.testcase.name), time="%f" % handler_time) if instance.status in ["error", "failed", "timeout", "flash_error"]: failure = ET.SubElement( eleTestcase, 'failure', type="failure", message=instance.reason) log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name)) bl = os.path.join(log_root, "build.log") hl = os.path.join(log_root, "handler.log") log_file = bl if instance.reason != 'Build error': if os.path.exists(hl): log_file = hl else: log_file = bl failure.text = self.process_log(log_file) elif instance.status == "skipped": ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped") result = ET.tostring(eleTestsuites) with open(filename, 'wb') as report: report.write(result) return fails, passes, errors, skips def csv_report(self, filename): with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "status", "extra_args", "handler", "handler_time", "ram_size", "rom_size"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance in self.instances.values(): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "extra_args": " ".join(instance.testcase.extra_args), "handler": instance.platform.simulation} rowdict["status"] = instance.status if instance.status not in ["error", "failed", "timeout"]: if instance.handler: rowdict["handler_time"] = instance.metrics.get("handler_time", 0) ram_size = instance.metrics.get("ram_size", 0) rom_size = instance.metrics.get("rom_size", 0) rowdict["ram_size"] = ram_size rowdict["rom_size"] = rom_size cw.writerow(rowdict) def json_report(self, filename, append=False, version="NA"): logger.info(f"Writing JSON report {filename}") report = {} selected = self.selected_platforms report["environment"] = {"os": os.name, "zephyr_version": version, "toolchain": self.get_toolchain() } json_data = {} if os.path.exists(filename) and append: with open(filename, 'r') as json_file: json_data = json.load(json_file) suites = json_data.get("testsuites", []) if suites: suite = suites[0] testcases = suite.get("testcases", []) else: suite = {} testcases = [] for p in selected: inst = self.get_platform_instances(p) for _, instance in inst.items(): testcase = {} handler_log = os.path.join(instance.build_dir, "handler.log") build_log = os.path.join(instance.build_dir, "build.log") device_log = os.path.join(instance.build_dir, "device.log") handler_time = instance.metrics.get('handler_time', 0) ram_size = instance.metrics.get ("ram_size", 0) rom_size = instance.metrics.get("rom_size",0) for k in instance.results.keys(): testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases )) testcase = {"testcase": k, "arch": instance.platform.arch, "platform": p, } if ram_size: testcase["ram_size"] = ram_size if rom_size: testcase["rom_size"] = rom_size if instance.results[k] in ["PASS"] or instance.status == 'passed': testcase["status"] = "passed" if instance.handler: testcase["execution_time"] = handler_time elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]: testcase["status"] = "failed" testcase["reason"] = instance.reason testcase["execution_time"] = handler_time if os.path.exists(handler_log): testcase["test_output"] = self.process_log(handler_log) elif os.path.exists(device_log): testcase["device_log"] = self.process_log(device_log) else: testcase["build_log"] = self.process_log(build_log) elif instance.status == 'skipped': testcase["status"] = "skipped" testcase["reason"] = instance.reason testcases.append(testcase) suites = [ {"testcases": testcases} ] report["testsuites"] = suites with open(filename, "wt") as json_file: json.dump(report, json_file, indent=4, separators=(',',':')) def get_testcase(self, identifier): results = [] for _, tc in self.testcases.items(): for case in tc.cases: if case == identifier: results.append(tc) return results class CoverageTool: """ Base class for every supported coverage tool """ def __init__(self): self.gcov_tool = None self.base_dir = None @staticmethod def factory(tool): if tool == 'lcov': t = Lcov() elif tool == 'gcovr': t = Gcovr() else: logger.error("Unsupported coverage tool specified: {}".format(tool)) return None logger.debug(f"Select {tool} as the coverage tool...") return t @staticmethod def retrieve_gcov_data(input_file): logger.debug("Working on %s" % input_file) extracted_coverage_info = {} capture_data = False capture_complete = False with open(input_file, 'r') as fp: for line in fp.readlines(): if re.search("GCOV_COVERAGE_DUMP_START", line): capture_data = True continue if re.search("GCOV_COVERAGE_DUMP_END", line): capture_complete = True break # Loop until the coverage data is found. if not capture_data: continue if line.startswith("*"): sp = line.split("<") if len(sp) > 1: # Remove the leading delimiter "*" file_name = sp[0][1:] # Remove the trailing new line char hex_dump = sp[1][:-1] else: continue else: continue extracted_coverage_info.update({file_name: hex_dump}) if not capture_data: capture_complete = True return {'complete': capture_complete, 'data': extracted_coverage_info} @staticmethod def create_gcda_files(extracted_coverage_info): logger.debug("Generating gcda files") for filename, hexdump_val in extracted_coverage_info.items(): # if kobject_hash is given for coverage gcovr fails # hence skipping it problem only in gcovr v4.1 if "kobject_hash" in filename: filename = (filename[:-4]) + "gcno" try: os.remove(filename) except Exception: pass continue with open(filename, 'wb') as fp: fp.write(bytes.fromhex(hexdump_val)) def generate(self, outdir): for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True): gcov_data = self.__class__.retrieve_gcov_data(filename) capture_complete = gcov_data['complete'] extracted_coverage_info = gcov_data['data'] if capture_complete: self.__class__.create_gcda_files(extracted_coverage_info) logger.debug("Gcov data captured: {}".format(filename)) else: logger.error("Gcov data capture incomplete: {}".format(filename)) with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog: ret = self._generate(outdir, coveragelog) if ret == 0: logger.info("HTML report generated: {}".format( os.path.join(outdir, "coverage", "index.html"))) class Lcov(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('*' + pattern + '*') def add_ignore_directory(self, pattern): self.ignores.append('*/' + pattern + '/*') def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.info") ztestfile = os.path.join(outdir, "ztest.info") cmd = ["lcov", "--gcov-tool", self.gcov_tool, "--capture", "--directory", outdir, "--rc", "lcov_branch_coverage=1", "--output-file", coveragefile] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract", coveragefile, os.path.join(self.base_dir, "tests", "ztest", "*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove", ztestfile, os.path.join(self.base_dir, "tests/ztest/test/*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) files = [coveragefile, ztestfile] else: files = [coveragefile] for i in self.ignores: subprocess.call( ["lcov", "--gcov-tool", self.gcov_tool, "--remove", coveragefile, i, "--output-file", coveragefile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) # The --ignore-errors source option is added to avoid it exiting due to # samples/application_development/external_lib/ return subprocess.call(["genhtml", "--legend", "--branch-coverage", "--ignore-errors", "source", "-output-directory", os.path.join(outdir, "coverage")] + files, stdout=coveragelog) class Gcovr(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('.*' + pattern + '.*') def add_ignore_directory(self, pattern): self.ignores.append(".*/" + pattern + '/.*') @staticmethod def _interleave_list(prefix, list): tuple_list = [(prefix, item) for item in list] return [item for sublist in tuple_list for item in sublist] def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.json") ztestfile = os.path.join(outdir, "ztest.json") excludes = Gcovr._interleave_list("-e", self.ignores) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o", coveragefile, outdir] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-f", "tests/ztest", "-e", "tests/ztest/test/*", "--json", "-o", ztestfile, outdir], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: files = [coveragefile, ztestfile] else: files = [coveragefile] subdir = os.path.join(outdir, "coverage") os.makedirs(subdir, exist_ok=True) tracefiles = self._interleave_list("--add-tracefile", files) return subprocess.call(["gcovr", "-r", self.base_dir, "--html", "--html-details"] + tracefiles + ["-o", os.path.join(subdir, "index.html")], stdout=coveragelog) class DUT(object): def __init__(self, id=None, serial=None, serial_baud=None, platform=None, product=None, serial_pty=None, connected=False, pre_script=None, post_script=None, post_flash_script=None, runner=None): self.serial = serial self.serial_baud = 115200 if serial_baud: self.serial_baud = serial_baud self.platform = platform self.serial_pty = serial_pty self._counter = Value("i", 0) self._available = Value("i", 1) self.connected = connected self.pre_script = pre_script self.id = id self.product = product self.runner = runner self.fixtures = [] self.post_flash_script = post_flash_script self.post_script = post_script self.pre_script = pre_script self.probe_id = None self.notes = None self.lock = Lock() self.match = False @property def available(self): with self._available.get_lock(): return self._available.value @available.setter def available(self, value): with self._available.get_lock(): self._available.value = value @property def counter(self): with self._counter.get_lock(): return self._counter.value @counter.setter def counter(self, value): with self._counter.get_lock(): self._counter.value = value def to_dict(self): d = {} exclude = ['_available', '_counter', 'match'] v = vars(self) for k in v.keys(): if k not in exclude and v[k]: d[k] = v[k] return d def __repr__(self): return f"<{self.platform} ({self.product}) on {self.serial}>" class HardwareMap: schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml") manufacturer = [ 'ARM', 'SEGGER', 'MBED', 'STMicroelectronics', 'Atmel Corp.', 'Texas Instruments', 'Silicon Labs', 'NXP Semiconductors', 'Microchip Technology Inc.', 'FTDI', 'Digilent' ] runner_mapping = { 'pyocd': [ 'DAPLink CMSIS-DAP', 'MBED CMSIS-DAP' ], 'jlink': [ 'J-Link', 'J-Link OB' ], 'openocd': [ 'STM32 STLink', '^XDS110.*', 'STLINK-V3' ], 'dediprog': [ 'TTL232R-3V3', 'MCP2200 USB Serial Port Emulator' ] } def __init__(self): self.detected = [] self.duts = [] def add_device(self, serial, platform, pre_script, is_pty, baud=None): device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud) if is_pty: device.serial_pty = serial else: device.serial = serial self.duts.append(device) def load(self, map_file): hwm_schema = scl.yaml_load(self.schema_path) duts = scl.yaml_load_verify(map_file, hwm_schema) for dut in duts: pre_script = dut.get('pre_script') post_script = dut.get('post_script') post_flash_script = dut.get('post_flash_script') platform = dut.get('platform') id = dut.get('id') runner = dut.get('runner') serial = dut.get('serial') baud = dut.get('baud', None) product = dut.get('product') fixtures = dut.get('fixtures', []) new_dut = DUT(platform=platform, product=product, runner=runner, id=id, serial=serial, serial_baud=baud, connected=serial is not None, pre_script=pre_script, post_script=post_script, post_flash_script=post_flash_script) new_dut.fixtures = fixtures new_dut.counter = 0 self.duts.append(new_dut) def scan(self, persistent=False): from serial.tools import list_ports if persistent and platform.system() == 'Linux': # On Linux, /dev/serial/by-id provides symlinks to # '/dev/ttyACMx' nodes using names which are unique as # long as manufacturers fill out USB metadata nicely. # # This creates a map from '/dev/ttyACMx' device nodes # to '/dev/serial/by-id/usb-...' symlinks. The symlinks # go into the hardware map because they stay the same # even when the user unplugs / replugs the device. # # Some inexpensive USB/serial adapters don't result # in unique names here, though, so use of this feature # requires explicitly setting persistent=True. by_id = Path('/dev/serial/by-id') def readlink(link): return str((by_id / link).resolve()) persistent_map = {readlink(link): str(link) for link in by_id.iterdir()} else: persistent_map = {} serial_devices = list_ports.comports() logger.info("Scanning connected hardware...") for d in serial_devices: if d.manufacturer in self.manufacturer: # TI XDS110 can have multiple serial devices for a single board # assume endpoint 0 is the serial, skip all others if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'): continue s_dev = DUT(platform="unknown", id=d.serial_number, serial=persistent_map.get(d.device, d.device), product=d.product, runner='unknown', connected=True) for runner, _ in self.runner_mapping.items(): products = self.runner_mapping.get(runner) if d.product in products: s_dev.runner = runner continue # Try regex matching for p in products: if re.match(p, d.product): s_dev.runner = runner s_dev.connected = True self.detected.append(s_dev) else: logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d)) def save(self, hwm_file): # use existing map self.detected.sort(key=lambda x: x.serial or '') if os.path.exists(hwm_file): with open(hwm_file, 'r') as yaml_file: hwm = yaml.load(yaml_file, Loader=SafeLoader) if hwm: hwm.sort(key=lambda x: x['serial'] or '') # disconnect everything for h in hwm: h['connected'] = False h['serial'] = None for _detected in self.detected: for h in hwm: if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match: h['connected'] = True h['serial'] = _detected.serial _detected.match = True new_duts = list(filter(lambda d: not d.match, self.detected)) new = [] for d in new_duts: new.append(d.to_dict()) if hwm: hwm = hwm + new else: hwm = new with open(hwm_file, 'w') as yaml_file: yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False) self.load(hwm_file) logger.info("Registered devices:") self.dump() else: # create new file dl = [] for _connected in self.detected: platform = _connected.platform id = _connected.id runner = _connected.runner serial = _connected.serial product = _connected.product d = { 'platform': platform, 'id': id, 'runner': runner, 'serial': serial, 'product': product, 'connected': _connected.connected } dl.append(d) with open(hwm_file, 'w') as yaml_file: yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False) logger.info("Detected devices:") self.dump(detected=True) def dump(self, filtered=[], header=[], connected_only=False, detected=False): print("") table = [] if detected: to_show = self.detected else: to_show = self.duts if not header: header = ["Platform", "ID", "Serial device"] for p in to_show: platform = p.platform connected = p.connected if filtered and platform not in filtered: continue if not connected_only or connected: table.append([platform, p.id, p.serial]) print(tabulate(table, headers=header, tablefmt="github"))
utils.py
# coding: utf-8 # This file is a part of VK4XMPP transport # © simpleApps, 2014. """ Contains useful functions which used across the modules """ import threading import xmpp import urllib from socket import error from writer import * isNumber = lambda obj: (not execute(int, (obj,), False) is None) def execute(handler, list=(), log=True): """ Just executes handler(*list) safely Writes a crashlog if errors occurred """ try: result = handler(*list) except (SystemExit, xmpp.NodeProcessed): result = True except Exception: result = None if log: crashLog(handler.func_name) logger.error("Exception happened during executing function: %s%s" % (handler.func_name, str(list))) return result def runThread(func, args=(), name=None, delay=0): """ Runs a thread with custom args and name Needed to reduce code Parameters: func: function you need to be running in a thread args: function arguments name: thread name att: number of attempts delay: if set, then threading.Timer will be started, not threading.Thread """ if delay: logger.debug("threading: starting timer for %s%s, " "name:%s, delay:%s" % (func.func_name, str(args), name, delay)) thr = threading.Timer(delay, execute, (func, args)) else: thr = threading.Thread(target=execute, args=(func, args)) thr.name = str(name or func.__name__) + "-" + str(time.time()) thr.start() return thr def safe(func): """ Executes func(*args) safely """ def wrapper(*args): try: func(*args) except xmpp.NodeProcessed: pass except Exception: crashLog(func.func_name) wrapper.__name__ = func.__name__ return wrapper def cache(func): """ Caches user/group ids for future usage """ def wrapper(self, uid, fields=None): fields = fields or [] fieldsStr = ",".join(fields) if uid in self.cache: if self.cache[uid]["fields"] == fieldsStr: return self.cache[uid] result = func(self, uid, fields) if result: result["fields"] = fieldsStr if "user_id" in result: del result["user_id"] if uid in self.cache: self.cache[uid].update(result) else: self.cache[uid] = result return result wrapper.__name__ = func.__name__ return wrapper def threaded(func): """ Another decorator. Executes a function in a thread """ def wrapper(*args): runThread(func, args) wrapper.__name__ = "threaded_%s" % func.__name__ return wrapper def buildDataForm(form=None, type="form", fields=[], title=None, data=[]): """ Provides easier method to build data forms using dict for each form object Parameters: form: xmpp.DataForm object type: form type fields: list of form objects represented as dict, e.g. [{"var": "cool", "type": "text-single", "desc": "my cool description", "value": "cool"}] title: form title data: advanced data for form. e.g. instructions (if string in the list), look at xmpp/protocol.py:1326 """ if title and form: form.setTitle(title) form = form or xmpp.DataForm(type, data, title) for key in fields: field = form.setField(key["var"], key.get("value"), key.get("type"), key.get("desc"), key.get("options")) if key.get("payload"): field.setPayload(key["payload"]) if key.get("label"): field.setLabel(key["label"]) if key.get("requred"): field.setRequired() return form def buildIQError(stanza, error=xmpp.ERR_FEATURE_NOT_IMPLEMENTED, text=None): """ Provides a way to build IQ error reply """ error = xmpp.Error(stanza, error, True) if text: tag = error.getTag("error") if tag: tag.setTagData("text", text) return error def normalizeValue(value): """ Normalizes boolean values from dataform replies """ if isNumber(value): value = int(value) elif value and value.lower() == "true": value = 1 else: value = 0 return value def getLinkData(url, encode=True): """ Gets link data and ignores any exceptions Parameters: encode: base64 data encode """ try: opener = urllib.urlopen(url) data = opener.read() except (Exception, error): return "" if data and encode: data = data.encode("base64") return data TIME_VALUES = {"s": 60, "m": 360, "d": 86400, "M": 2592000, "y": 31536000} def TimeMachine(text): """ TARDIS Prototype """ time = 0 for i in xrange(0, len(text) - 1, 3): current = text[i:i + 3] x = current[-1] if x in TIME_VALUES: time += int(current[:-1]) * TIME_VALUES[x] return time # Yay!
multiprocessing_test.py
# import os # #fork只能用于linux/unix中 # pid = os.fork() # print("bobby") # if pid == 0: # print('子进程 {} ,父进程是: {}.' .format(os.getpid(), os.getppid())) # else: # print('我是父进程:{}.'.format(pid)) import multiprocessing #多进程编程 import time def get_html(n): time.sleep(n) print("sub_progress success") return n if __name__ == "__main__": # progress = multiprocessing.Process(target=get_html, args=(2,)) # print(progress.pid) # progress.start() # print(progress.pid) # progress.join() # print("main progress end") #使用线程池 pool = multiprocessing.Pool(multiprocessing.cpu_count()) result = pool.apply_async(get_html, args=(3,)) #等待所有任务完成 pool.close() pool.join() print(result.get()) #imap # for result in pool.imap(get_html, [1,5,3]): # print("{} sleep success".format(result)) # for result in pool.imap_unordered(get_html, [1,5,3]): # print("{} sleep success".format(result))
prefetch_iterator.py
# Copyright 2021 The Flax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility for constructing an iterator which prefetches data asynchronously. """ import threading import warnings class PrefetchIterator: """Wraps an iterator to provide async prefetching. DEPRECATION WARNING: TensorFlow datasets no longer require manual prefetching. Previously this class was used to make data loading using TensorFlow datasets more efficient. Now TF data handles prefetching with NumPy iterators correctly. Example:: tf_iter = dataset.as_numpy_iterator() # only loads data while calling next tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background """ def __init__(self, data_iter, buffer_size=1): """Construct a PrefetchIterator. Args: data_iter: the Iterator that should be prefetched. buffer_size: how many items to prefetch (default: 1). """ warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`' ' prefetch method instead', DeprecationWarning) self._data_iter = data_iter self.buffer_size = buffer_size self._cond = threading.Condition() self._buffer = [] self._active = True self._thread = threading.Thread(target=self._prefetch_loop, daemon=True) self._thread.start() self._error = None def __iter__(self): return self def __next__(self): with self._cond: self._cond.wait_for(lambda: self._buffer or not self._active) if self._buffer: item = self._buffer.pop(0) self._cond.notifyAll() return item if self._error: raise self._error # pylint: disable=raising-bad-type assert not self._active raise StopIteration() def close(self): with self._cond: self._active = False self._cond.notifyAll() def _prefetch_loop(self): """Prefetch loop that prefetches a tf dataset.""" def _predicate(): return len(self._buffer) < self.buffer_size or not self._active while True: try: item = next(self._data_iter) with self._cond: self._buffer.append(item) self._cond.notifyAll() self._cond.wait_for(_predicate) if not self._active: return except Exception as e: # pylint: disable=broad-except with self._cond: self._error = e self._active = False self._cond.notifyAll() return
test_threaded_import.py
# This jest a variant of the very old (early 90's) file # Demo/threads/bug.py. It simply provokes a number of threads into # trying to zaimportuj the same module "at the same time". # There are no pleasant failure modes -- most likely jest that Python # complains several times about module random having no attribute # randrange, oraz then Python hangs. zaimportuj _imp jako imp zaimportuj os zaimportuj importlib zaimportuj sys zaimportuj time zaimportuj shutil zaimportuj unittest z test.support zaimportuj ( verbose, import_module, run_unittest, TESTFN, reap_threads, forget, unlink, rmtree, start_threads) threading = import_module('threading') def task(N, done, done_tasks, errors): spróbuj: # We don't use modulefinder but still zaimportuj it w order to stress # importing of different modules z several threads. jeżeli len(done_tasks) % 2: zaimportuj modulefinder zaimportuj random inaczej: zaimportuj random zaimportuj modulefinder # This will fail jeżeli random jest nie completely initialized x = random.randrange(1, 3) wyjąwszy Exception jako e: errors.append(e.with_traceback(Nic)) w_końcu: done_tasks.append(threading.get_ident()) finished = len(done_tasks) == N jeżeli finished: done.set() # Create a circular zaimportuj structure: A -> C -> B -> D -> A # NOTE: `time` jest already loaded oraz therefore doesn't threaten to deadlock. circular_imports_modules = { 'A': """jeżeli 1: zaimportuj time time.sleep(%(delay)s) x = 'a' zaimportuj C """, 'B': """jeżeli 1: zaimportuj time time.sleep(%(delay)s) x = 'b' zaimportuj D """, 'C': """zaimportuj B""", 'D': """zaimportuj A""", } klasa Finder: """A dummy finder to detect concurrent access to its find_spec() method.""" def __init__(self): self.numcalls = 0 self.x = 0 self.lock = threading.Lock() def find_spec(self, name, path=Nic, target=Nic): # Simulate some thread-unsafe behaviour. If calls to find_spec() # are properly serialized, `x` will end up the same jako `numcalls`. # Otherwise not. assert imp.lock_held() przy self.lock: self.numcalls += 1 x = self.x time.sleep(0.01) self.x = x + 1 klasa FlushingFinder: """A dummy finder which flushes sys.path_importer_cache when it gets called.""" def find_spec(self, name, path=Nic, target=Nic): sys.path_importer_cache.clear() klasa ThreadedImportTests(unittest.TestCase): def setUp(self): self.old_random = sys.modules.pop('random', Nic) def tearDown(self): # If the `random` module was already initialized, we restore the # old module at the end so that pickling tests don't fail. # See http://bugs.python.org/issue3657#msg110461 jeżeli self.old_random jest nie Nic: sys.modules['random'] = self.old_random def check_parallel_module_init(self): jeżeli imp.lock_held(): # This triggers on, e.g., z test zaimportuj autotest. podnieś unittest.SkipTest("can't run when zaimportuj lock jest held") done = threading.Event() dla N w (20, 50) * 3: jeżeli verbose: print("Trying", N, "threads ...", end=' ') # Make sure that random oraz modulefinder get reimported freshly dla modname w ['random', 'modulefinder']: spróbuj: usuń sys.modules[modname] wyjąwszy KeyError: dalej errors = [] done_tasks = [] done.clear() t0 = time.monotonic() przy start_threads(threading.Thread(target=task, args=(N, done, done_tasks, errors,)) dla i w range(N)): dalej completed = done.wait(10 * 60) dt = time.monotonic() - t0 jeżeli verbose: print("%.1f ms" % (dt*1e3), flush=Prawda, end=" ") dbg_info = 'done: %s/%s' % (len(done_tasks), N) self.assertNieprawda(errors, dbg_info) self.assertPrawda(completed, dbg_info) jeżeli verbose: print("OK.") def test_parallel_module_init(self): self.check_parallel_module_init() def test_parallel_meta_path(self): finder = Finder() sys.meta_path.insert(0, finder) spróbuj: self.check_parallel_module_init() self.assertGreater(finder.numcalls, 0) self.assertEqual(finder.x, finder.numcalls) w_końcu: sys.meta_path.remove(finder) def test_parallel_path_hooks(self): # Here the Finder instance jest only used to check concurrent calls # to path_hook(). finder = Finder() # In order dla our path hook to be called at each import, we need # to flush the path_importer_cache, which we do by registering a # dedicated meta_path entry. flushing_finder = FlushingFinder() def path_hook(path): finder.find_spec('') podnieś ImportError sys.path_hooks.insert(0, path_hook) sys.meta_path.append(flushing_finder) spróbuj: # Flush the cache a first time flushing_finder.find_spec('') numtests = self.check_parallel_module_init() self.assertGreater(finder.numcalls, 0) self.assertEqual(finder.x, finder.numcalls) w_końcu: sys.meta_path.remove(flushing_finder) sys.path_hooks.remove(path_hook) def test_import_hangers(self): # In case this test jest run again, make sure the helper module # gets loaded z scratch again. spróbuj: usuń sys.modules['test.threaded_import_hangers'] wyjąwszy KeyError: dalej zaimportuj test.threaded_import_hangers self.assertNieprawda(test.threaded_import_hangers.errors) def test_circular_imports(self): # The goal of this test jest to exercise implementations of the import # lock which use a per-module lock, rather than a global lock. # In these implementations, there jest a possible deadlock with # circular imports, dla example: # - thread 1 imports A (grabbing the lock dla A) which imports B # - thread 2 imports B (grabbing the lock dla B) which imports A # Such implementations should be able to detect such situations oraz # resolve them one way albo the other, without freezing. # NOTE: our test constructs a slightly less trivial zaimportuj cycle, # w order to better stress the deadlock avoidance mechanism. delay = 0.5 os.mkdir(TESTFN) self.addCleanup(shutil.rmtree, TESTFN) sys.path.insert(0, TESTFN) self.addCleanup(sys.path.remove, TESTFN) dla name, contents w circular_imports_modules.items(): contents = contents % {'delay': delay} przy open(os.path.join(TESTFN, name + ".py"), "wb") jako f: f.write(contents.encode('utf-8')) self.addCleanup(forget, name) importlib.invalidate_caches() results = [] def import_ab(): zaimportuj A results.append(getattr(A, 'x', Nic)) def import_ba(): zaimportuj B results.append(getattr(B, 'x', Nic)) t1 = threading.Thread(target=import_ab) t2 = threading.Thread(target=import_ba) t1.start() t2.start() t1.join() t2.join() self.assertEqual(set(results), {'a', 'b'}) def test_side_effect_import(self): code = """jeżeli 1: zaimportuj threading def target(): zaimportuj random t = threading.Thread(target=target) t.start() t.join()""" sys.path.insert(0, os.curdir) self.addCleanup(sys.path.remove, os.curdir) filename = TESTFN + ".py" przy open(filename, "wb") jako f: f.write(code.encode('utf-8')) self.addCleanup(unlink, filename) self.addCleanup(forget, TESTFN) self.addCleanup(rmtree, '__pycache__') importlib.invalidate_caches() __import__(TESTFN) @reap_threads def test_main(): old_switchinterval = Nic spróbuj: old_switchinterval = sys.getswitchinterval() sys.setswitchinterval(1e-5) wyjąwszy AttributeError: dalej spróbuj: run_unittest(ThreadedImportTests) w_końcu: jeżeli old_switchinterval jest nie Nic: sys.setswitchinterval(old_switchinterval) jeżeli __name__ == "__main__": test_main()
updater.py
from googaccount.helpers import get_channel from oauth2client.contrib.django_orm import Storage from googaccount.models import CredentialsModel from django.utils import timezone from datetime import datetime, timedelta import httplib2, json from meta.models import Meta import time import threading from main.models import LastActivity import django django.setup() DEFAULT_UPDATE_INTERVAL = 30 BASE_URL = "https://www.googleapis.com/youtube/v3/" DEBUG = True def ytapicall(appcreds, url): storage = Storage(CredentialsModel, 'id', appcreds, 'credential') credential = storage.get() if credential is None or credential.invalid == True: raise Exception("bad creds") http = httplib2.Http() http = credential.authorize(http) resp, data = http.request(url) data = json.loads(data) if 'error' in data: raise Exception("YouTube API Error: %s" % data['error']) return data def get_youtube_subs(appcreds): chan = get_channel(appcreds) url = "%schannels?part=statistics&id=%s" % (BASE_URL, chan) data = ytapicall(appcreds, url) sub_count = data['items'][0]['statistics']['subscriberCount'] return sub_count def get_viewers_for_vid(appcreds, vid): url = "%svideos?part=liveStreamingDetails&id=%s" % (BASE_URL, vid) data = ytapicall(appcreds, url) viewers = 0 if 'items' in data and len(data['items']): viewers = int(data['items'][0]['liveStreamingDetails']['concurrentViewers']) return viewers def get_likes_for_vid(appcreds, vid): url = "%svideos?part=statistics&id=%s" % (BASE_URL, vid) data = ytapicall(appcreds, url) likes = 0 if 'items' in data and len(data['items']): likes = int(data['items'][0]['statistics']['likeCount']) return likes def get_youtube_viewers(appcreds): url = "%sliveBroadcasts?broadcastType=all&part=snippet&broadcastStatus=active&maxResults=50&" % (BASE_URL) data = ytapicall(appcreds, url) m = 0 for event in data['items']: viewers = get_viewers_for_vid(appcreds, event['id']) if viewers > m: m = viewers return m def get_youtube_likes(appcreds): url = "%sliveBroadcasts?broadcastType=all&part=snippet&broadcastStatus=active&maxResults=50&" % (BASE_URL) data = ytapicall(appcreds, url) m = 0 for event in data['items']: likes = get_likes_for_vid(appcreds, event['id']) if likes > m: m = likes return m f = { 'youtubesubs': get_youtube_subs, 'youtubeviewers': get_youtube_viewers, 'youtubelikes': get_youtube_likes, } def update_meta(meta, delay=DEFAULT_UPDATE_INTERVAL): if not meta.type in f: raise Exception("Unsupported type") try: count = f[meta.type](meta.appcreds) meta.counter = count meta.last_update = timezone.now() meta.next_update = timezone.now() + timedelta(seconds=delay) meta.save() except Exception, E: print "Error on %s" % meta.id meta.running = False meta.next_update = timezone.now() + timedelta(seconds=delay*3) meta.save() def thread_runner(instance): if DEBUG: print "running instance", instance.id runtime = time.time() delay = DEFAULT_UPDATE_INTERVAL try: lu = LastActivity.objects.filter(user=instance.user) recent = timezone.now() - timedelta(seconds=120) if not lu.count() or lu[0].timestamp < recent: if DEBUG: print "Not active" delay = delay * 20 else: if DEBUG: print "Active" update_meta(instance, delay) instance.running = False instance.save() except KeyboardInterrupt, E: msg = "Attempting to update %s failed for %s: \n %s: %s" % (instance.type, instance.id, type(E), E) print msg # we don't update last_update on failure. instance.next_update = timezone.now() + timedelta(seconds=delay) instance.running = False instance.save() if DEBUG: print "finished", instance.id, " in ", time.time()-runtime def run(): # Reset any running tasks at runner start; if anything got stuck # because of a restart, we want to clear it when we start. for i in Meta.objects.filter(running=True): i.running = False i.save() while True: try: time_threshold = timezone.now() for i in Meta.objects.filter(next_update__lt=time_threshold, running=False).order_by('next_update'): # Set next update here; then reset it again when the function actually finishes. i.running = True i.save() t = threading.Thread(target=thread_runner, args=[i]) t.start() except Exception, E: print "Something very basic went wrong with something: %s" % E time.sleep(1) if __name__ == "__main__": run()
server.py
import traceback import urllib.parse from http.server import HTTPServer, BaseHTTPRequestHandler from threading import Thread from telegram import Bot, Message from . import bot class CustomHandler(BaseHTTPRequestHandler): # noinspection PyPep8Naming def do_GET(self): try: path = urllib.parse.urlparse(self.path) args = urllib.parse.parse_qs(path.query) uuid = args.get('uuid')[0] score = args.get('score')[0] user_id, inline_message_id, message = bot.players.get(uuid) message: Message chat_id = None if message is None else message.chat_id message_id = None if message is None else message.message_id tg_bot: Bot = bot.bot.bot tg_bot.set_game_score(user_id=user_id, score=score, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id) self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Access-Control-Allow-Origin', '*') self.end_headers() self.wfile.write(bytes('Success', encoding='utf-8')) return except: traceback.print_exc() def run(): server_address = ('0.0.0.0', 8123) httpd = HTTPServer(server_address, CustomHandler) t = Thread(target=httpd.serve_forever, daemon=True, name="Score thread") t.start()
TGeventServer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections import logging import multiprocessing from multiprocessing import Process import gevent from gevent.server import StreamServer from thrift.server.TServer import TServer from thrift.transport.TTransport import TTransportException from thrift.transport.TSocket import TSocket from thrift.protocol.THeaderProtocol import THeaderProtocolFactory from gevent import monkey monkey.patch_all(select=False) class TGeventServer(TServer): """ Server with a fixed size pool of worker subprocesses which service requests. Note that if you need shared state between the handlers - it's up to you! Written by Dvir Volk, doat.com """ def __init__(self, port, *args): TServer.__init__(self, *args) self.port = port self.numWorkers = multiprocessing.cpu_count() self.workers = [] self.postForkCallback = None def setPostForkCallback(self, callback): if not isinstance(callback, collections.Callable): raise TypeError("This is not a callback!") self.postForkCallback = callback def setNumWorkers(self, num): """Set the number of worker threads that should be created""" self.numWorkers = num def serveClient(self, socket, address): """Process input/output from a client for as long as possible""" client = TSocket() client.setHandle(socket) itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) if isinstance(self.inputProtocolFactory, THeaderProtocolFactory): oprot = iprot else: oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransportException: pass except Exception as x: logging.exception(x) itrans.close() otrans.close() def serve_forever(self): if self.postForkCallback: self.postForkCallback() while True: try: self.server.serve_forever() except (KeyboardInterrupt, SystemExit): return 0 except Exception as x: logging.exception(x) def serve(self, listener=None): """Start a fixed number of worker threads and put client into a queue""" if not listener: listener = ('', self.port) self.server = StreamServer(listener, self.serveClient) # Temporary patch for gevent 0.13.x # Remove pre_start when we are fully on gevent 1.0 if gevent.version_info[0] == 0: self.server.pre_start() else: self.server.init_socket() print('Starting %s workers' % self.numWorkers) for _ in range(self.numWorkers - 1): # Current process also serves p = Process(target=self.serve_forever) self.workers.append(p) p.start() self.serve_forever() def stop(self): for worker in self.workers: worker.terminate() self.server.stop()
test_threading.py
import threading import time from test.fake_time_util import fake_time import pytest from pyinstrument import Profiler from .util import do_nothing def test_profiler_access_from_multiple_threads(): profiler = Profiler() profiler.start() thread_exception = None def helper(): while profiler._active_session and len(profiler._active_session.frame_records) < 10: time.sleep(0.0001) try: profiler.stop() except Exception as e: nonlocal thread_exception thread_exception = e t1 = threading.Thread(target=helper) t1.start() while t1.is_alive(): do_nothing() t1.join() with pytest.raises(Exception) as excinfo: profiler.output_html() assert "this profiler is still running" in excinfo.value.args[0] assert thread_exception is not None assert ( "Failed to stop profiling. Make sure that you start/stop profiling on the same thread." in thread_exception.args[0] ) # the above stop failed. actually stop the profiler profiler.stop()
webserver.py
import flask from flask import Flask from threading import Thread app = Flask('') @app.route('/') def home(): return "Running!" def run(): app.run(host='0.0.0.0',port=8080) def keep_alive(): t = Thread(target=run) t.start()
executorservodriver.py
import json import os import socket import threading import time import traceback from .base import (Protocol, BaseProtocolPart, RefTestExecutor, RefTestImplementation, TestharnessExecutor, strip_server) from ..testrunner import Stop from ..webdriver_server import wait_for_service webdriver = None ServoCommandExtensions = None here = os.path.join(os.path.split(__file__)[0]) extra_timeout = 5 def do_delayed_imports(): global webdriver import webdriver global ServoCommandExtensions class ServoCommandExtensions(object): def __init__(self, session): self.session = session @webdriver.client.command def get_prefs(self, *prefs): body = {"prefs": list(prefs)} return self.session.send_session_command("POST", "servo/prefs/get", body) @webdriver.client.command def set_prefs(self, prefs): body = {"prefs": prefs} return self.session.send_session_command("POST", "servo/prefs/set", body) @webdriver.client.command def reset_prefs(self, *prefs): body = {"prefs": list(prefs)} return self.session.send_session_command("POST", "servo/prefs/reset", body) def change_prefs(self, old_prefs, new_prefs): # Servo interprets reset with an empty list as reset everything if old_prefs: self.reset_prefs(*old_prefs.keys()) self.set_prefs({k: parse_pref_value(v) for k, v in new_prefs.items()}) # See parse_pref_from_command_line() in components/config/opts.rs def parse_pref_value(value): if value == "true": return True if value == "false": return False try: return float(value) except ValueError: return value class ServoBaseProtocolPart(BaseProtocolPart): def execute_script(self, script, asynchronous=False): pass def set_timeout(self, timeout): pass def wait(self): pass def set_window(self, handle): pass class ServoWebDriverProtocol(Protocol): implements = [ServoBaseProtocolPart] def __init__(self, executor, browser, capabilities, **kwargs): do_delayed_imports() Protocol.__init__(self, executor, browser) self.capabilities = capabilities self.host = browser.webdriver_host self.port = browser.webdriver_port self.init_timeout = browser.init_timeout self.session = None def connect(self): """Connect to browser via WebDriver.""" wait_for_service((self.host, self.port), timeout=self.init_timeout) self.session = webdriver.Session(self.host, self.port, extension=ServoCommandExtensions) self.session.start() def after_connect(self): pass def teardown(self): self.logger.debug("Hanging up on WebDriver session") try: self.session.end() except Exception: pass def is_alive(self): try: # Get a simple property over the connection self.session.window_handle # TODO what exception? except Exception: return False return True def wait(self): while True: try: self.session.execute_async_script("") except webdriver.TimeoutException: pass except (socket.timeout, IOError): break except Exception as e: self.logger.error(traceback.format_exc(e)) break class ServoWebDriverRun(object): def __init__(self, func, session, url, timeout, current_timeout=None): self.func = func self.result = None self.session = session self.url = url self.timeout = timeout self.result_flag = threading.Event() def run(self): executor = threading.Thread(target=self._run) executor.start() flag = self.result_flag.wait(self.timeout + extra_timeout) if self.result is None: assert not flag self.result = False, ("EXTERNAL-TIMEOUT", None) return self.result def _run(self): try: self.result = True, self.func(self.session, self.url, self.timeout) except webdriver.TimeoutException: self.result = False, ("EXTERNAL-TIMEOUT", None) except (socket.timeout, IOError): self.result = False, ("CRASH", None) except Exception as e: message = getattr(e, "message", "") if message: message += "\n" message += traceback.format_exc(e) self.result = False, ("INTERNAL-ERROR", e) finally: self.result_flag.set() def timeout_func(timeout): if timeout: t0 = time.time() return lambda: time.time() - t0 > timeout + extra_timeout else: return lambda: False class ServoWebDriverTestharnessExecutor(TestharnessExecutor): supports_testdriver = True def __init__(self, browser, server_config, timeout_multiplier=1, close_after_done=True, capabilities=None, debug_info=None, **kwargs): TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1, debug_info=None) self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities) with open(os.path.join(here, "testharness_servodriver.js")) as f: self.script = f.read() self.timeout = None def on_protocol_change(self, new_protocol): pass def is_alive(self): return self.protocol.is_alive() def do_test(self, test): url = self.test_url(test) timeout = test.timeout * self.timeout_multiplier + extra_timeout if timeout != self.timeout: try: self.protocol.session.timeouts.script = timeout self.timeout = timeout except IOError: self.logger.error("Lost webdriver connection") return Stop success, data = ServoWebDriverRun(self.do_testharness, self.protocol.session, url, timeout).run() if success: return self.convert_result(test, data) return (test.result_cls(*data), []) def do_testharness(self, session, url, timeout): session.url = url result = json.loads( session.execute_async_script( self.script % {"abs_url": url, "url": strip_server(url), "timeout_multiplier": self.timeout_multiplier, "timeout": timeout * 1000})) # Prevent leaking every page in history until Servo develops a more sane # page cache session.back() return result def on_environment_change(self, new_environment): self.protocol.session.extension.change_prefs( self.last_environment.get("prefs", {}), new_environment.get("prefs", {}) ) class TimeoutError(Exception): pass class ServoWebDriverRefTestExecutor(RefTestExecutor): def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None, capabilities=None, debug_info=None, **kwargs): """Selenium WebDriver-based executor for reftests""" RefTestExecutor.__init__(self, browser, server_config, screenshot_cache=screenshot_cache, timeout_multiplier=timeout_multiplier, debug_info=debug_info) self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities) self.implementation = RefTestImplementation(self) self.timeout = None with open(os.path.join(here, "reftest-wait_webdriver.js")) as f: self.wait_script = f.read() def reset(self): self.implementation.reset() def is_alive(self): return self.protocol.is_alive() def do_test(self, test): try: result = self.implementation.run_test(test) return self.convert_result(test, result) except IOError: return test.result_cls("CRASH", None), [] except TimeoutError: return test.result_cls("TIMEOUT", None), [] except Exception as e: message = getattr(e, "message", "") if message: message += "\n" message += traceback.format_exc(e) return test.result_cls("INTERNAL-ERROR", message), [] def screenshot(self, test, viewport_size, dpi): # https://github.com/web-platform-tests/wpt/issues/7135 assert viewport_size is None assert dpi is None timeout = (test.timeout * self.timeout_multiplier + extra_timeout if self.debug_info is None else None) if self.timeout != timeout: try: self.protocol.session.timeouts.script = timeout self.timeout = timeout except IOError: self.logger.error("Lost webdriver connection") return Stop return ServoWebDriverRun(self._screenshot, self.protocol.session, self.test_url(test), timeout).run() def _screenshot(self, session, url, timeout): session.url = url session.execute_async_script(self.wait_script) return session.screenshot() def on_environment_change(self, new_environment): self.protocol.session.extension.change_prefs( self.last_environment.get("prefs", {}), new_environment.get("prefs", {}) )
WalletServer.py
from APNS import APNS from Wallet import Sign import json import calendar from datetime import datetime from flask_sqlalchemy import SQLAlchemy from OpenSSL import Signing from hashlib import md5 from flask import Flask, request, Response import threading #Setup Server and Database app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../Databases/AppleWalletTest.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) #Models class Device(db.Model): id = db.Column(db.Integer, primary_key=True) device_id = db.Column(db.String(80)) uuid = db.Column(db.String(128)) push_token = db.Column(db.String(128)) pass_type_id = db.Column(db.String(64)) serial_number = db.Column(db.String(64)) registration_date = db.Column(db.Integer) def __init__(self, device_id, uuid, push_token, pass_type_id, serial_number, registration_date=None): self.device_id = device_id self.uuid = uuid self.push_token = push_token self.pass_type_id = pass_type_id self.serial_number = serial_number self.registration_date = registration_date if registration_date is not None else calendar.timegm(datetime.utcnow().utctimetuple()) def __repr__(self): return json.dumps({"Device": {"id": self.id, "device_id": self.device_id, "uuid": self.uuid, "push_token": self.push_token, "pass_type_id": self.pass_type_id, "serial_number": self.serial_number, "registration_date": self.registration_date} }, indent=4, sort_keys=False, separators=(",", ":")) class Pass(db.Model): id = db.Column(db.Integer, primary_key=True) serial_number = db.Column(db.String(64)) auth_token = db.Column(db.String(128)) pass_type_id = db.Column(db.String(64)) pass_json = db.Column(db.Text) last_updated = db.Column(db.Integer) def __init__(self, serial_number, auth_token, pass_type_id, pass_json, last_updated=None): self.serial_number = serial_number self.auth_token = auth_token self.pass_type_id = pass_type_id self.pass_json = pass_json self.last_updated = last_updated if last_updated is not None else calendar.timegm(datetime.utcnow().utctimetuple()) def __repr__(self): return json.dumps({"Pass": {"id": self.id, "serial_number": self.serial_number, "auth_token": self.auth_token, "pass_type_id": self.pass_type_id, "pass_json": self.pass_json, "last_updated": self.last_updated} }, indent=4, sort_keys=False, separators=(",", ":")) #Server Endpoints def make_response(code, data, contenttype=None, headers=None): return Response(response=data, status=code, headers=headers, mimetype=None, content_type=contenttype) def strip_validate_authToken(token): return token.replace("ApplePass ", "") if token is not None and "ApplePass " in token else None def flatten_tuple(tuple): res = [] for (item) in tuple: res.extend(item) return res # Register Devices for pass-book notifications @app.route("/wallet/<version>/devices/<deviceLibraryIdentifier>/registrations/<passTypeIdentifier>/<serialNumber>", methods=["POST"]) def registerWallet(version, deviceLibraryIdentifier, passTypeIdentifier, serialNumber): if version == "v1": authToken = strip_validate_authToken(request.headers.get("Authorization")) if authToken: if Pass.query.filter_by(serial_number=serialNumber, auth_token=authToken).count() > 0: uuid = deviceLibraryIdentifier + "-" + serialNumber if Device.query.filter_by(uuid=uuid).count() < 1: jsonData = request.get_json(silent=False) device = Device(deviceLibraryIdentifier, uuid, jsonData["pushToken"], passTypeIdentifier, serialNumber) db.session.add(device) db.session.commit() return make_response(201, "Successfully Registered") return make_response(200, "Already Registered") return make_response(401, "Unauthorized User") return make_response(404, "Unsupported Pass Version") # Unregister devices for passbook-notifications @app.route("/wallet/<version>/devices/<deviceLibraryIdentifier>/registrations/<passTypeIdentifier>/<serialNumber>", methods=["DELETE"]) def unregisterWallet(version, deviceLibraryIdentifier, passTypeIdentifier, serialNumber): if version == "v1": authToken = strip_validate_authToken(request.headers.get("Authorization")) if authToken: if Pass.query.filter_by(serial_number=serialNumber, auth_token=authToken).count() > 0: uuid = deviceLibraryIdentifier + "-" + serialNumber if Device.query.filter_by(uuid=uuid).count() > 0: Device.query.filter_by(uuid=uuid).delete() db.session.commit() return make_response(200, "Successfully Unregistered") return make_response(401, "Device doesn't exist") return make_response(401, "Unauthorized User") return make_response(404, "Unsupported Pass Version") # Get all passes that need updating @app.route("/wallet/<version>/devices/<deviceLibraryIdentifier>/registrations/<passTypeIdentifier>", methods=["GET"]) def getUpdatedPasses(version, deviceLibraryIdentifier, passTypeIdentifier): if version == "v1": if Device.query.filter_by(device_id=deviceLibraryIdentifier).count() > 0: serialNumbers = flatten_tuple(db.session.query(Device.serial_number).filter_by(device_id=deviceLibraryIdentifier, pass_type_id=passTypeIdentifier).all()) updatedSinceDate = request.args.get("passesUpdatedSince") if updatedSinceDate is not None and len(updatedSinceDate): serialNumbers = flatten_tuple(db.session.query(Pass.serial_number).filter(Pass.serial_number.in_(serialNumbers)).filter(Pass.last_updated >= int(updatedSinceDate)).all()) else: serialNumbers = flatten_tuple(db.session.query(Pass.serial_number).filter(Pass.serial_number.in_(serialNumbers)).all()) if len(serialNumbers) > 0: time_stamp = calendar.timegm(datetime.utcnow().utctimetuple()) resp_data = json.dumps({"lastUpdated": str(time_stamp), "serialNumbers": serialNumbers }, indent=4, sort_keys=False, separators=(",", ":")) return make_response(200, resp_data, "application/json", headers={"last-modified": str(time_stamp)}) else: return make_response(204, "No updates available") else: return make_response(404, "Device not registered") return make_response(404, "Unsupported Pass Version") @app.route("/wallet/<version>/passes/<passTypeIdentifier>/<serialNumber>", methods=["GET"]) def getLatestPass(version, passTypeIdentifier, serialNumber): if version == "v1": authToken = strip_validate_authToken(request.headers.get("Authorization")) if authToken: if Pass.query.filter_by(serial_number=serialNumber, pass_type_id=passTypeIdentifier, auth_token=authToken).count() > 0: hasNewPass = True modified_since = request.headers.get("if-modified-since") if modified_since is not None and len(modified_since): hasNewPass = db.session.query(Pass).filter(Pass.last_updated >= int(modified_since)).count() > 0 if hasNewPass: file = open("../Passes/ScenePass.pkpass", "rb") data = file.read() file.close() time_stamp = calendar.timegm(datetime.utcnow().utctimetuple()) return make_response(200, data, "application/vnd.apple.pkpass", headers={"last-modified": str(time_stamp)}) return make_response(304, "No updates available") return make_response(401, "Unauthorized User") return make_response(404, "Unsupported Pass Version") @app.route("/wallet/<version>/log", methods=["POST"]) def logWallet(version): jsonData = request.get_json(silent=False) print(json.dumps(jsonData, indent=4, sort_keys=False, separators=(",", ":")) + "\n\n") return make_response(200, "Successfully Logged") @app.route("/wallet/generatePassForUser", methods=["GET", "POST"]) def generatePassForUser(): #TODO Authentication request.. Used AuthToken, etc.. requestJSON = request.get_json(silent=False) #Just for testing if using GET requests to download the pass.. if requestJSON is None: requestJSON = { "cardNumber": "6046463399374746", "firstName": "Brandon", "lastName": "Test", "points": "925", "member_date": "09/17" } if requestJSON is not None: cardNumber = requestJSON.pop("cardNumber", None) firstName = requestJSON.pop("firstName", None) lastName = requestJSON.pop("lastName", None) points = requestJSON.pop("points", None) date = requestJSON.pop("member_date", None) #TODO: Check that all fields exist. pkpass = Sign.PKPass("../Passes/ScenePass.pass") passJSON = json.loads(pkpass.readJSON()) passJSON["authenticationToken"] = md5(cardNumber.encode("utf-8")).hexdigest() #md5(uuid4().bytes).hexdigest() passJSON["storeCard"]["headerFields"][0]["value"] = points passJSON["storeCard"]["secondaryFields"][0]["value"] = firstName + " " + lastName passJSON["storeCard"]["auxiliaryFields"][0]["value"] = date passJSON["barcode"]["message"] = cardNumber passJSON["barcode"]["altText"] = cardNumber passJSON["serialNumber"] = cardNumber pkpass.writeJSON(json.dumps(passJSON, ensure_ascii=False, separators=(",", ":"), indent=4, sort_keys=False)) pkpass.sign("../PassCerts/PassKit.p12", "../PassCerts/AppleWWDR.pem", "123") pkpass.compress("../Passes/ScenePass.pkpass") Pass.query.filter_by(serial_number=cardNumber).delete() db.session.commit() storePass = Pass(serial_number=passJSON["serialNumber"], auth_token=passJSON["authenticationToken"], pass_type_id=passJSON["passTypeIdentifier"], pass_json=json.dumps(passJSON, ensure_ascii=False, separators=(",", ":"), indent=0, sort_keys=False)) db.session.add(storePass) db.session.commit() thread = threading.Thread(name="updateFakePassThread", target=updateFakePassForUser, args=(cardNumber,)) thread.start() return make_response(200, json.dumps(passJSON, ensure_ascii=False, separators=(",", ":"), indent=4, sort_keys=False), "application/json") @app.route("/wallet/fakeUpdate/<cardNumber>", methods=["GET", "POST"]) def updateFakePassForUser(cardNumber): auth_token = md5(cardNumber.encode("utf-8")).hexdigest() passes = Pass.query.filter(Pass.serial_number == cardNumber, Pass.auth_token == auth_token).all() for ps in passes: ps.last_updated = calendar.timegm(datetime.utcnow().utctimetuple()) db.session.commit() apns = APNS.APNS(sandbox=False, use_certificate=False) tokens = flatten_tuple(db.session.query(Device.push_token).filter(Device.device_id).all()) for token in tokens: response = apns.push(token, "pass.com.scene.test", json.dumps({"aps":{}}, cls=None, ensure_ascii=False, separators=(',', ':'))) print(response.read().decode("utf-8")) return make_response(200, "Success") def run(): Signing.initializeOpenSSL() #db.drop_all() #TODO: Add Migration db.create_all() app.run("0.0.0.0", port=5000)
routers.py
import json import os import sys import time from datetime import datetime from multiprocessing import Process from random import randint, choice from flask import session, jsonify import firebase_admin import fitz import pytz from firebase_admin import credentials from firebase_admin import firestore from flask import redirect from flask import render_template, url_for, send_file from flask import request from werkzeug.utils import secure_filename from urllib.parse import quote, unquote from server import app app.secret_key = os.urandom(32) cred = credentials.Certificate(os.path.realpath("creds.json")) firebase_admin.initialize_app(cred) db = firestore.client() @app.route('/') def home_page(): return render_template('home.html') @app.route('/submit_book_details', methods=['POST']) def submit_book_details(): f = request.form session['dat_to_show'] = json.dumps(f.to_dict()) return jsonify(url=url_for('see_book_details')) @app.route('/see_book_details') def see_book_details(): data = json.loads(session['dat_to_show']) print("data: ", data) data: dict return render_template('book_details_page.html', **data) @app.route('/registration') def go_registration(): return render_template('register.html') @app.route('/login') def go_login(): return render_template('login.html') @app.route('/categories') def go_categories(): colors = ['primary', 'success', 'danger', 'info', 'warning'] return render_template('categories.html', categories={'Classic': {'sub': ['Avant-Garde', 'Baroque', 'Chant'], 'visitors': randint(100, 1000), 'colors': [choice(colors) for _ in colors] }, 'Fiction': {'sub': ['Avant-Garde', 'Baroque', 'Chant'], 'visitors': randint(100, 1000), 'colors': [choice(colors) for _ in colors] }, 'Thriller': {'sub': ['Avant-Garde', 'Baroque', 'Chant'], 'visitors': randint(100, 1000), 'colors': [choice(colors) for _ in colors] }, 'History': {'sub': ['Avant-Garde', 'Baroque', 'Chant'], 'visitors': randint(100, 1000), 'colors': [choice(colors) for _ in colors] }, 'Others': {'sub': ['Avant-Garde', 'Baroque', 'Chant'], 'visitors': randint(100, 1000), 'colors': [choice(colors) for _ in colors] }}) @app.route('/share_books') def go_share_books(): return render_template('share_books.html') def parse_first_image(file_path: str): pdf_document_object = fitz.open(file_path) page = pdf_document_object.loadPage(0) pix = page.getPixmap(matrix=fitz.Matrix(2, 2)) # 300 DPI pix.writePNG( os.path.realpath(os.path.join('static', 'pdf_images', "{}.png".format( file_path.split("/")[-1]))) ) print("Done fetching..") return @app.route('/upload_pdf', methods=['POST']) def upload_pdf(): f = request.files print(f['pdf']) filename = secure_filename(f['pdf'].filename).lstrip().rstrip() if filename: full_file_path = os.path.realpath(os.path.join('static', 'pdf', "{}".format(filename))) f['pdf'].save(full_file_path) Process(target=parse_first_image, args=(full_file_path,)).start() return redirect(url_for('go_share_books')) @app.route('/profile') def go_profile(): return render_template('profile.html') @app.route('/shops') def go_shops(): return render_template('shop.html', shops=['BookStore', 'Websites', 'Buy PDF']) @app.route('/about_us') def go_about_us(): return render_template('about_us.html') @app.route('/reviews') def go_reviews(): users_ref = db.collection(u'user-review') docs = users_ref.stream() # for doc in docs: # print(f'{doc.id} => {doc.to_dict()}') r = [doc.to_dict() for doc in docs] reviews = [r[i:i + 3] for i in range(0, len(r), 3)] print(reviews) return render_template('reviews.html', reviews=reviews) @app.route('/authors') def go_authors(): top_authors = [ ['J.K Rowling', randint(20, 120), 'https://wallpapercave.com/wp/wp3635857.jpg'] , ['George R R Martin', randint(20, 120), 'https://i.guim.co.uk/img/media/021e90c7e3bf949a9a900722c746da43019ef2a7/235_1068_5648_3389/master/5648.jpg?width=1020&quality=85&auto=format&fit=max&s=9c36a072c2c1c48e4fe9778b5cc2afad'] , ['Yuval Noah Harari', randint(20, 120), 'https://static01.nyt.com/images/2018/11/11/business/11HARARI-01/11HARARI-01-superJumbo.jpg?quality=75&auto=webp'] , ['Mark Manson', randint(20, 120), 'https://markmanson.net/wp-content/uploads/2019/04/mark-manson-media-kit-hi-res-headshot-square-1.jpg'] , ['Anthony Doerr', randint(20, 120), 'https://i.guim.co.uk/img/media/04457d428cdd20659c82604926d5dc25625c306b/0_165_5760_3456/master/5760.jpg?width=1020&quality=85&auto=format&fit=max&s=8b2b7f6ee07bd52ce10bd67cbc39d9d3'] , ['Andrzej Sapkowski', randint(20, 120), 'https://www.gamereactor.eu/media/02/witcherauthorreceive_2740213_650x.jpg'] , ['Haruki Murakami', randint(20, 120), 'https://api.time.com/wp-content/uploads/2000/04/haruki-murakami-time-100-2015-icons1.jpg?w=800&quality=85'] , ['Sowad Ahmed', randint(20, 120), 'https://www.biography.com/.image/t_share/MTE4MDAzNDEwNjUyMjY4MDQ2/james-patterson-9434791-1-402.jpg'] , ['Steve Smith', randint(20, 120), 'https://www.cricketcountry.com/wp-content/uploads/2015/11/Steven-Smith.jpg'] , ['Stephen King', randint(20, 120), 'https://www.getalternative.com/wp-content/uploads/2016/10/37-1-768x461.jpg'] ] return render_template('authors.html', top_authors=top_authors) @app.route('/read_in') def go_read_in(): books = [{'book_name': b, 'read_by': randint(100, 1000), 'book_size': round((os.path.getsize(os.path.realpath(os.path.join('static', 'pdf', b)))) / (1024 ** 2), 2)} for b in os.listdir(os.path.realpath(os.path.join('static', 'pdf'))) if b.endswith(".pdf")] books = [books[i:i + 2] for i in range(0, len(books), 2)] print(books) return render_template('read_in.html', books=books) @app.route('/read_pdf/<pdf_name>') def read_pdf(pdf_name: str): assert pdf_name in os.listdir(os.path.realpath(os.path.join('static', 'pdf'))) return render_template('read_pdf.html', pdf_name=pdf_name) @app.route('/download_pdf/<pdf_name>') def download_pdf(pdf_name: str): print(pdf_name) assert pdf_name in os.listdir(os.path.realpath(os.path.join('static', 'pdf'))) return send_file(os.path.realpath(os.path.join('static', 'pdf', pdf_name)), as_attachment=True) @app.route('/search_for_book/<query_string>') def search_for_book(query_string): books = [{'book_name': b, 'read_by': randint(100, 1000), 'book_size': round((os.path.getsize(os.path.realpath(os.path.join('static', 'pdf', b)))) / (1024 ** 2), 2)} for b in os.listdir(os.path.realpath(os.path.join('static', 'pdf'))) if b.endswith(".pdf") and b.lower().find(query_string.lower()) != -1] books = [books[i:i + 2] for i in range(0, len(books), 2)] return render_template('search_result.html', books=books) @app.route('/submit_review', methods=['POST']) def submit_review(): f = request.form doc_ref = db.collection(u'user-review').document(u'{}'.format(int(time.time()))) doc_ref.set({ u'username': f['user_name'], u'review_text': f['review_text'], u'when': datetime.now(pytz.timezone('Asia/Dhaka')).strftime("%I:%M:%S %p %d %B,%Y") }) return redirect(url_for('go_reviews'))
simple.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' import os.path from threading import Thread import traceback from urllib.request import urlretrieve from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QFormLayout def download_file(url: str, file_name: str): try: local_file_name, _ = urlretrieve(url, file_name) print(os.path.abspath(local_file_name)) except: print(traceback.format_exc()) class MainWindow(QWidget): def __init__(self): super().__init__() self.line_edit_url = QLineEdit('https://codeload.github.com/gil9red/SimplePyScripts/zip/master') self.line_edit_file_name = QLineEdit('SimplePyScripts.zip') self.button_download = QPushButton('Download') self.button_download.clicked.connect(self.download) layout = QFormLayout() layout.addRow('URL:', self.line_edit_url) layout.addRow('File Name:', self.line_edit_file_name) layout.addWidget(self.button_download) self.setLayout(layout) def download(self): url = self.line_edit_url.text() file_name = self.line_edit_file_name.text() thread = Thread(target=download_file, args=(url, file_name)) thread.start() if __name__ == '__main__': app = QApplication([]) mw = MainWindow() mw.show() app.exec()
test_ipc.py
#!/usr/bin/env python3 # -*- mode: python -*- # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ There are currently no IPC tests within python, in part because there are no servers yet available. """ import logging import threading import time import unittest from avro import ipc, protocol, schema def NowMS(): return int(time.time() * 1000) ECHO_PROTOCOL_JSON = """ { "protocol" : "Echo", "namespace" : "org.apache.avro.ipc.echo", "types" : [ { "type" : "record", "name" : "Ping", "fields" : [ { "name" : "timestamp", "type" : "long", "default" : -1 }, { "name" : "text", "type" : "string", "default" : "" } ] }, { "type" : "record", "name" : "Pong", "fields" : [ { "name" : "timestamp", "type" : "long", "default" : -1 }, { "name" : "ping", "type" : "Ping" } ] } ], "messages" : { "ping" : { "request" : [ { "name" : "ping", "type" : "Ping" } ], "response" : "Pong" } } } """ ECHO_PROTOCOL = protocol.Parse(ECHO_PROTOCOL_JSON) class EchoResponder(ipc.Responder): def __init__(self): super(EchoResponder, self).__init__( local_protocol=ECHO_PROTOCOL, ) def Invoke(self, message, request): logging.info('Message: %s', message) logging.info('Request: %s', request) ping = request['ping'] return {'timestamp': NowMS(), 'ping': ping} class TestIPC(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestIPC, self).__init__(*args, **kwargs) # Reference to an Echo RPC over HTTP server: self._server = None def StartEchoServer(self): self._server = ipc.AvroIpcHttpServer( interface='localhost', port=0, responder=EchoResponder(), ) def ServerThread(): self._server.serve_forever() self._server_thread = threading.Thread(target=ServerThread) self._server_thread.start() logging.info( 'Echo RPC Server listening on %s:%s', *self._server.server_address) logging.info('RPC socket: %s', self._server.socket) def StopEchoServer(self): assert (self._server is not None) self._server.shutdown() self._server_thread.join() self._server.server_close() self._server = None def testEchoService(self): """Tests client-side of the Echo service.""" self.StartEchoServer() try: (server_host, server_port) = self._server.server_address transceiver = ipc.HTTPTransceiver(host=server_host, port=server_port) requestor = ipc.Requestor( local_protocol=ECHO_PROTOCOL, transceiver=transceiver, ) response = requestor.Request( message_name='ping', request_datum={'ping': {'timestamp': 31415, 'text': 'hello ping'}}, ) logging.info('Received echo response: %s', response) response = requestor.Request( message_name='ping', request_datum={'ping': {'timestamp': 123456, 'text': 'hello again'}}, ) logging.info('Received echo response: %s', response) transceiver.Close() finally: self.StopEchoServer() if __name__ == '__main__': raise Exception('Use run_tests.py')
down.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @File : down.py @Time : 2019-07-29 21:29 @Author : Empty Chan @Contact : chen19941018@gmail.com @Description: @License : (C) Copyright 2016-2017, iFuture Corporation Limited. """ # -*- coding:utf-8 -*- import os import re import sys import time import json import base64 import datetime import requests from lxml import etree from threading import Thread # 单线程,多线程采集方式选择(多线程采集速度快但机器负载短时高) thread = 'multi' # thread = 'single' # 理论python和前端js会自动转义,但如果采集名称因引号或其它需转义的字符报错,请将相应采集名修改如下 # hot_name = .replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").replace("\"", "").replace("\'", "").strip() # 采集数据保存目录,为了安全请修改本程序名字,或移动到其他目录,并修改以下路径,默认与程序同目录 dir = os.path.dirname(os.path.abspath(__file__)) + "/json/" # dir = "webrootdir/json/" # dir = "/tmp/json/" try: os.mkdir(dir) except: print("json文件夹创建失败(已存在或无写入权限)") # 字符替换加密(默认为大小写反转),修改此处顺序和添加数字替换可实现不同密码加密(并同时修改get/index.php内密码) def multiple_replace(text): dic = {"a": "A", "b": "B", "c": "C", "d": "D", "e": "E", "f": "F", "g": "G", "h": "H", "i": "I", "j": "J", "k": "K", "l": "L", "m": "M", "n": "N", "o": "O", "p": "P", "q": "Q", "r": "R", "s": "S", "t": "T", "u": "U", "v": "V", "w": "W", "x": "X", "y": "Y", "z": "Z", "A": "a", "B": "b", "C": "c", "D": "d", "E": "e", "F": "f", "G": "g", "H": "h", "I": "i", "J": "j", "K": "k", "L": "l", "M": "m", "N": "n", "O": "o", "P": "p", "Q": "q", "R": "r", "S": "s", "T": "t", "U": "u", "V": "v", "W": "w", "X": "x", "Y": "y", "Z": "z"} pattern = "|".join(map(re.escape, list(dic.keys()))) return re.sub(pattern, lambda m: dic[m.group()], text) # UTC时间转本地时间(+8:00) def utc2local(utc_st): now_stamp = time.time() local_time = datetime.datetime.fromtimestamp(now_stamp) utc_time = datetime.datetime.utcfromtimestamp(now_stamp) offset = local_time - utc_time local_st = utc_st + offset return local_st def parse_baidu(name): try: jsondict = {} if name == 'now': jsondict["title"] = "百度实时热点" url = "http://top.baidu.com/buzz?b=1" if name == 'today': jsondict["title"] = "百度今日热点" url = "http://top.baidu.com/buzz?b=341" if name == 'week': jsondict["title"] = "百度七日热点" url = "http://top.baidu.com/buzz?b=42" fname = dir + "baidu_" + name + ".json" r = requests.get(url, timeout=(5, 10)) r.encoding = 'gb2312' soup = etree.HTML(r.text.replace("<tr >", "<tr class=\"hideline\">")) list = [] list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time for soup_a in soup.xpath("//tr[@class='hideline']"): blist = {} hot_name = soup_a.xpath("./td[2]/a[1]/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = soup_a.xpath("./td[2]/a[1]/@href")[0] hot_num = soup_a.xpath("./td[@class='last']/span/text()")[0] group = name hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url blist["num"] = hot_num list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "(" + name + ")" + "采集错误,请及时更新规则!") # 知乎热榜 def parse_zhihu_hot(): try: fname = dir + "zhihu_hot.json" zhihu_all = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total?limit=50&desktop=true" headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'http://www.zhihu.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Host': 'www.zhihu.com' } r = requests.get(zhihu_all, headers=headers, timeout=(5, 10)).text data = json.loads(r) news = data['data'] list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "知乎全站热榜" for n in news: blist = {} hot_name = n['target']['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = n['target']['url'].replace("api.zhihu.com/questions/", "www.zhihu.com/question/") group = "zhihu_hot" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 微博热点排行榜 def parse_weibo(): try: fname = dir + "weibo.json" weibo_ssrd = "https://s.weibo.com/top/summary?cate=realtimehot" weibo = "https://s.weibo.com" r = requests.get(weibo_ssrd, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "微博热点排行榜" for soup_a in soup.xpath("//td[@class='td-02']"): blist = {} hot_name = soup_a.xpath("./a/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = weibo + soup_a.xpath("./a/@href")[0] try: hot_num = soup_a.xpath("./span/text()")[0] except IndexError: hot_num = '' # hot_num = None #与''皆是赋值空 if "javascript:void(0)" in hot_url: # 过滤微博的广告,做个判断 str_list = "" else: group = "weibo" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url if hot_num: blist["num"] = hot_num list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 贴吧热度榜单 def parse_tieba(): try: fname = dir + "tieba.json" tb_url = "http://tieba.baidu.com/hottopic/browse/topicList" headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'http://tieba.baidu.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Host': 'tieba.baidu.com' } r = requests.get(tb_url, headers=headers, timeout=(5, 10)).text data = json.loads(r) news = data['data']['bang_topic']['topic_list'] list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "贴吧热度榜单" for n in news: blist = {} hot_name = n['topic_desc'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = n['topic_url'].replace("&amp;", "&") group = "tieba" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # V2EX热帖 def parse_v2ex(): try: url = "https://www.v2ex.com/?tab=hot" fname = dir + "v2ex.json" r = requests.get(url, timeout=(5, 10)) soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "V2EX热帖" for soup_a in soup.xpath("//span[@class='item_title']/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = "https://www.v2ex.com" + soup_a.get('href') group = "v2ex" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 豆瓣讨论精选 def parse_douban(): try: url = "https://www.douban.com/group/explore" headers = { 'Host': 'www.douban.com', 'Referer': 'https://www.douban.com/group/explore' } fname = dir + "douban.json" r = requests.get(url, headers=headers, timeout=(5, 10)) soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "豆瓣讨论精选" for soup_a in soup.xpath("//div[@class='channel-item']/div[@class='bd']/h3/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "douban" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 天涯热帖 def parse_tianya(): try: url = "http://bbs.tianya.cn/hotArticle.jsp" headers = { 'Host': 'bbs.tianya.cn', 'Referer': 'http://bbs.tianya.cn/hotArticle.jsp' } fname = dir + "tianya.json" r = requests.get(url, headers=headers, timeout=(5, 10)) soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "天涯热帖" for soup_a in soup.xpath("//div[@class='mt5']/table/tbody/tr/td[@class='td-title']/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = 'http://bbs.tianya.cn' + soup_a.get('href') group = "tianya" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 抽屉新热榜 def parse_chouti(): try: url = "https://dig.chouti.com/link/hot" headers = { 'Referer': 'https://dig.chouti.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', } fname = dir + "chouti.json" r = requests.get(url, headers=headers, timeout=(5, 10)).text data = json.loads(r) news = data['data'] list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(str(data['data'][0]['time_into_pool'])[0:10]))) jsondict["time"] = list_time jsondict["title"] = "抽屉新热榜" for n in news: blist = {} hot_url = n['originalUrl'] if 'chouti.com' not in hot_url: hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "chouti" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 虎嗅网资讯 def parse_huxiu(): try: url = "https://www-api.huxiu.com/v1/article/list" headers = { 'Referer': 'https://www.huxiu.com/article', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', } fname = dir + "huxiu.json" r = requests.get(url, headers=headers, timeout=(5, 10)).text data = json.loads(r) news = data['data']['dataList'] list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(data['data']['dataList'][0]['dateline']))) jsondict["time"] = list_time jsondict["title"] = "虎嗅网资讯" for n in news: blist = {} hot_url = n['share_url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "huxiu" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 煎蛋网 def parse_jandan(): try: url = "https://jandan.net/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Referer': 'https://jandan.net/' } fname = dir + "jandan.json" r = requests.get(url, headers=headers, timeout=(5, 10)) soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "煎蛋网" for soup_a in soup.xpath("//div[@class='post f list-post']/div[@class='indexs']/h2/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "jandan" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 知乎日报 def parse_zhihu_daily(): try: url = "https://daily.zhihu.com/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Referer': 'https://daily.zhihu.com/' } fname = dir + "zhihu_daily.json" r = requests.get(url, headers=headers, timeout=(5, 10)).text.replace(" class=\"home\"", "") soup = etree.HTML(r) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "知乎日报" for soup_a in soup.xpath("//div[@class='box']/a"): blist = {} hot_name = soup_a.xpath('./span/text()')[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = "https://daily.zhihu.com" + soup_a.get('href') group = "zhihu_daily" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 黑客派-好玩 def parse_hacpai(name): try: jsondict = {} if name == "play": jsondict["title"] = "黑客派-好玩" group = "hacpai_play" url = "https://hacpai.com/domain/play" if name == "hot": jsondict["title"] = "黑客派-热议" group = "hacpai_hot" url = "https://hacpai.com/recent/hot" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Referer': 'https://hacpai.com/' } fname = dir + "hacpai_" + name + ".json" r = requests.get(url, headers=headers, timeout=(5, 10)) soup = etree.HTML(r.text) list = [] list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time for soup_a in soup.xpath("//h2[@class='article-list__title article-list__title--view fn__flex-1']/a[@data-id]"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "(" + name + ")" + "采集错误,请及时更新规则!") # 猫扑热帖 def parse_mop(): try: url = "https://www.mop.com/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "mop.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text.replace("<h3>", "").replace("</h3>", "")) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "猫扑热帖" for soup_a in soup.xpath("//div[@class='swiper-wrapper']")[0]: blist = {} hot_name = soup_a.xpath("./a/div/h2/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = soup_a.xpath("./a/@href")[0] group = "mop" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) for soup_b in soup.xpath("//div[@class='shuffling-two']/a"): blist = {} hot_name = soup_b.xpath("./div/p/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_b.get('href') group = "mop" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) for soup_c in soup.xpath("//div[@class='mop-hot']/div[1]/div/div/div/div/div[2]/a"): blist = {} hot_name = soup_c.text.replace("\r", "").replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = soup_c.get('href') group = "mop" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 果壳-科学人 def parse_guokr(): try: url = "https://www.guokr.com/scientific/" url2 = "https://www.guokr.com/beta/proxy/science_api/articles?retrieve_type=by_category&page=1" url3 = "https://www.guokr.com/beta/proxy/science_api/articles?retrieve_type=by_category&page=2" headers = { 'Referer': 'https://www.guokr.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "guokr.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text.replace("<span class=\"split\">|</span>", "")) r2 = requests.get(url2, headers=headers, timeout=(5, 10)).text data2 = json.loads(r2) r3 = requests.get(url3, headers=headers, timeout=(5, 10)).text data3 = json.loads(r3) list = [] jsondict = {} list_time = soup.xpath("//div[@class='article-info']/text()")[1].replace("\"", "").strip() jsondict["time"] = list_time jsondict["title"] = "果壳-科学人" for soup_a in soup.xpath("//a[@class='article-title']"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "guokr" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) for n in data2: blist = {} hot_url = "https://www.guokr.com/article/" + str(n['id']) + "/" hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "guokr" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) for n in data3: blist = {} hot_url = "https://www.guokr.com/article/" + str(n['id']) + "/" hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "guokr" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # IT之家 def parse_ithome(): try: url = "https://www.ithome.com/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "ithome.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "IT之家" for soup_a in soup.xpath("//div[@class='bx']/ul/li/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "ithome" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 央视要闻 def parse_cctv(): try: url = "http://news.cctv.com/data/index.json" headers = { 'Referer': 'https://news.cctv.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "cctv.json" r = requests.get(url, headers=headers, timeout=(5, 10)).text data = json.loads(r) list = [] jsondict = {} list_time = data['updateTime'] jsondict["time"] = list_time jsondict["title"] = "央视要闻" for n in data['rollData']: blist = {} hot_url = n['url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "cctv" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # cnBeta def parse_cnbeta(): try: url = "https://www.cnbeta.com/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "cnbeta.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "cnBeta" for soup_a in soup.xpath("//div[@class='items-area']/div/dl/dt/a"): blist = {} hot_name = soup_a.xpath("./span/text()") if hot_name: hot_name = hot_name[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() else: hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href').replace("//hot", "https://hot").strip() group = "cnbeta" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 联合早报-中港台 def parse_zaobao(): try: url = "https://www.zaobao.com.sg/realtime/china" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "zaobao.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "联合早报-中港台" for soup_a in soup.xpath("//a[@target='_self']"): blist = {} hot_name = soup_a.xpath("./div/span/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = "https://www.zaobao.com.sg" + soup_a.get('href').strip() group = "zaobao" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 微信公众号热门文章 def parse_weixin(): try: url = "https://weixin.sogou.com/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "weixin.json" fname2 = dir + "weixin_hot.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "微信公众号搜索热词" for soup_a in soup.xpath("//ol[@class='hot-news']/li"): blist = {} hot_name = soup_a.xpath("./a/text()")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").strip() hot_url = soup_a.xpath("./a/@href")[0] hot_num = soup_a.xpath("./span/span/@style")[0].replace("\\n", "").replace("\n", "").replace("\\r", "").replace( "\r", "").replace("width:", "").replace("%", "").strip() group = "weixin" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url blist["num"] = hot_num list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) list = [] jsondict = {} jsondict["time"] = list_time jsondict["title"] = "微信公众号热门文章" for soup_a in soup.xpath("//div[@class='txt-box']/h3/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "weixin" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname2, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 澎湃新闻 def parse_thepaper(): try: url = "https://www.thepaper.cn/load_chosen.jsp" headers = { 'Referer': 'https://www.thepaper.cn/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "thepaper.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "澎湃新闻" for soup_a in soup.xpath("//div[@class='news_li']/h2/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = "https://www.thepaper.cn/" + soup_a.get('href') group = "thepaper" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 纽约时报中文网-国际简报 def parse_nytimes(): try: url = "https://m.cn.nytimes.com/world" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "nytimes.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "纽约时报中文网-国际简报" for soup_a in soup.xpath("//li[@class='regular-item']/a"): blist = {} hot_name = soup_a.get('title').replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "nytimes" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 新京报-排行 def parse_bjnews(): try: url = "http://www.bjnews.com.cn/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "bjnews.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "新京报-排行" for soup_a in soup.xpath("//li/h3/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href') group = "bjnews" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 奇客的资讯 def parse_solidot(): try: url = "https://www.solidot.org/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "solidot.json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' soup = etree.HTML(r.text) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "奇客的资讯" for soup_a in soup.xpath("//div[@class='bg_htit']/h2/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = "https://www.solidot.org" + soup_a.get('href') group = "solidot" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 新浪科技 def parse_sinatech(): try: url = "https://feed.mix.sina.com.cn/api/roll/get?pageid=372&lid=2431&k=&num=50&page=1" headers = { 'Referer': 'http://tech.sina.com.cn/roll/rollnews.shtml#pageid=372&lid=2431&k=&num=50&page=1', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "sinatech.json" r = requests.get(url, headers=headers, timeout=(5, 10)).text data = json.loads(r) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(data['result']['data'][0]['ctime']))) jsondict["time"] = list_time jsondict["title"] = "新浪科技" for n in data['result']['data']: blist = {} hot_url = n['url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "sinatech" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 全球主机交流论坛 def parse_hostloc(): try: url = "https://www.hostloc.com/forum.php?mod=forumdisplay&fid=45&filter=author&orderby=dateline" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } cookies = {'hkCM_2132_saltkey': 'YUW6N18j', 'hkCM_2132_lastvisit': '1565188564', 'hkCM_2132_visitedfid': '45', 'L7DFW': 'f64d0d1c0e4afb6b8913e5cf1d39cbf2', 'hkCM_2132_sid': 'svW2eC', 'hkCM_2132_st_t': '0%7C1565195462%7Cc9e8fe0fa2043784ed064e22c9180fb3', 'hkCM_2132_forum_lastvisit': 'D_45_1565195462', 'hkCM_2132_lastact': '1565195463%09home.php%09misc', 'hkCM_2132_sendmail': '1'} fname = dir + "hostloc.json" r = requests.get(url, headers=headers, cookies=cookies, timeout=(5, 10)).text.replace("<th class=\"lock\">", "<abc>") soup = etree.HTML(r) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "全球主机交流论坛" for soup_a in soup.xpath("//th/a[@onclick='atarget(this)']"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = "https://www.hostloc.com/" + soup_a.get('href') group = "hostloc" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # 什么值得买-今日热门文章 def parse_smzdm_article(name): try: jsondict = {} if name == "today": id = "1" jsondict["title"] = "什么值得买热门文章(日榜)" if name == "week": id = "7" jsondict["title"] = "什么值得买热门文章(周榜)" if name == "month": id = "30" jsondict["title"] = "什么值得买热门文章(月榜)" url = "https://post.smzdm.com/rank/json_more/?unit=" + id + "&p=1" url2 = "https://post.smzdm.com/rank/json_more/?unit=" + id + "&p=2" headers = { 'Referer': 'https://post.smzdm.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "smzdm_article_" + name + ".json" r = requests.get(url, headers=headers, timeout=(5, 10)).text data = json.loads(r) r2 = requests.get(url2, headers=headers, timeout=(5, 10)).text data2 = json.loads(r2) list = [] list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time for n in data['data']: blist = {} hot_url = n['article_url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "smzdm_article" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) for n in data2['data']: blist = {} hot_url = n['article_url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "smzdm_article" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "(" + name + ")" + "采集错误,请及时更新规则!") # 知乎每日精选-编辑推荐 def parse_zhihu_good(): try: url = "https://www.zhihu.com/node/ExploreRecommendListV2" headers = { 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'authority': 'www.zhihu.com', 'Referer': 'https://www.zhihu.com/explore/recommendations', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } d = {'method': 'next', 'params': '{"limit":40,"offset":0}'} fname = dir + "zhihu_good.json" r = requests.post(url, data=d, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' json_data = "" list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time jsondict["title"] = "知乎每日精选-编辑推荐" for json_d in json.loads(r.text)['msg']: json_data = json_data + json_d soup = etree.HTML(json_data) for soup_a in soup.xpath("//div[@class='zm-item']/h2/a"): blist = {} hot_name = soup_a.text.replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() hot_url = soup_a.get('href').replace("/question/", "https://www.zhihu.com/question/") group = "zhihu_good" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "采集错误,请及时更新规则!") # AppStore排行榜 def parse_itunes(name, country): try: if country == "cn": country2 = "(国区)" if country == "us": country2 = "(美区)" jsondict = {} if name == "free": name2 = "FreeApplications" jsondict["title"] = "AppStore免费排行榜" + country2 if name == "paid": name2 = "PaidApplications" jsondict["title"] = "AppStore付费排行榜" + country2 if name == "revenue": name2 = "AppsByRevenue" jsondict["title"] = "AppStore收入排行榜" + country2 url = "https://itunes.apple.com/WebObjects/MZStoreServices.woa/ws/charts?cc=" + country + "&g=36&limit=100&name=" + name2 headers = { 'Referer': 'https://www.apple.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } fname = dir + "itunes_" + name + "_" + country + ".json" r = requests.get(url, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' data = json.loads(r.text) str_list = "https://itunes.apple.com/" + country + "/lookup?id=" # str_list="https://itunes.apple.com/lookup?country="+country+"&id=" list = [] list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time for n in data['resultIds']: str_list = str_list + n + "," r = requests.get(str_list, headers=headers, timeout=(5, 10)) r.encoding = 'utf-8' data = json.loads(r.text) for n in data['results']: blist = {} hot_url = n['trackViewUrl'] hot_name = n['trackName'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() releaseNotes = n.get('releaseNotes') if releaseNotes: releaseNotes = releaseNotes.replace("<", "").replace(">", "") else: releaseNotes = "" version = n.get('version') uptime = utc2local(datetime.datetime.strptime(n.get('currentVersionReleaseDate'), '%Y-%m-%dT%H:%M:%SZ')) hot_num = n['genres'][0] group = "itunes_free" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url blist["num"] = hot_num blist["description"] = "最新版本: " + version + "\n更新时间: " + str(uptime) + " (北京时间)\n\n" + releaseNotes list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "(" + name + "," + country + ")" + "采集错误,请及时更新规则!") # 头条推荐 def parse_toutiao(name): try: url = "https://www.toutiao.com/api/pc/feed/?category=__all__&min_behot_time=" + str(int(time.time())) headers = { 'Referer': 'https://www.toutiao.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36' } cookies = {'tt_webid': '6722913290500539908', 'WEATHER_CITY': '%E5%8C%97%E4%BA%AC', '__tasessionId': 'ccyrlpfcb1565300241329', 'tt_webid': '6722913290500539908', 'csrftoken': '36c38d5c3a8001b9d598c09f79c8c653'} fname = dir + "toutiao_" + name + ".json" r = requests.get(url, headers=headers, cookies=cookies, timeout=(5, 10)).text data = json.loads(r) list = [] jsondict = {} list_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) jsondict["time"] = list_time if name == 'a': jsondict["title"] = "头条1" if name == 'b': jsondict["title"] = "头条2" if name == 'c': jsondict["title"] = "头条3" if name == 'd': jsondict["title"] = "头条4" if name == 'e': jsondict["title"] = "头条5" if name == 'f': jsondict["title"] = "头条6" if name == 'g': jsondict["title"] = "头条7" if name == 'h': jsondict["title"] = "头条8" if name == 'i': jsondict["title"] = "头条9" if name == 'j': jsondict["title"] = "头条10" for n in data['data']: blist = {} hot_url = "https://www.toutiao.com" + n['source_url'] hot_name = n['title'].replace("\\n", "").replace("\n", "").replace("\\r", "").replace("\r", "").strip() group = "toutiao" hot_url = "get/?url=" + multiple_replace(base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_url.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) + "&group=" + group + "&title=" + multiple_replace( base64.urlsafe_b64encode( base64.urlsafe_b64encode(hot_name.encode("utf-8")).decode("utf-8").encode("utf-8")).decode( "utf-8").replace("=", "")[::-1]) blist["name"] = hot_name blist["url"] = hot_url list.append(blist) jsondict["data"] = list with open(fname, "w+", encoding='utf-8') as f: f.write(json.dumps(jsondict, ensure_ascii=False, indent=2, separators=(',', ':'))) except: print(sys._getframe().f_code.co_name + "(" + name + ")" + "采集错误,请及时更新规则!") # 单线程运行 def single_run(): print("单线程采集开始", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) t1 = time.time() parse_smzdm_article("today") parse_baidu("now") parse_itunes("free", "cn") parse_hostloc() parse_sinatech() parse_solidot() parse_bjnews() parse_nytimes() parse_itunes("paid", "cn") parse_thepaper() parse_weixin() parse_zaobao() parse_cnbeta() parse_baidu("today") parse_cctv() parse_ithome() parse_guokr() parse_itunes("revenue", "cn") parse_mop() parse_hacpai("play") parse_smzdm_article("week") parse_zhihu_daily() parse_jandan() parse_huxiu() parse_chouti() parse_itunes("free", "us") parse_tianya() parse_douban() parse_v2ex() parse_tieba() parse_weibo() parse_itunes("paid", "us") parse_baidu("week") parse_zhihu_hot() parse_zhihu_good() parse_smzdm_article("month") parse_hacpai("hot") parse_itunes("revenue", "us") print("单线程采集完成", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) print("耗时:", time.time() - t1) def single_toutiao(): print("开始采集头条(前5次)") t1 = time.time() parse_toutiao("a") time.sleep(60) parse_toutiao("b") time.sleep(60) parse_toutiao("c") time.sleep(60) parse_toutiao("d") time.sleep(60) parse_toutiao("e") print("头条采集完成(前5次)") print("耗时:", time.time() - t1) def single_toutiao_b(): print("开始采集头条(后5次)") t1 = time.time() parse_toutiao("f") time.sleep(60) parse_toutiao("g") time.sleep(60) parse_toutiao("h") time.sleep(60) parse_toutiao("i") time.sleep(60) parse_toutiao("j") print("头条采集完成(后5次)") print("耗时:", time.time() - t1) # 多线程抓取 def multi_run(): print("多线程采集开始", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) t1 = time.time() threads = [] ts2 = Thread(target=parse_hostloc) ts3 = Thread(target=parse_sinatech) ts4 = Thread(target=parse_solidot) ts5 = Thread(target=parse_bjnews) ts6 = Thread(target=parse_nytimes) ts7 = Thread(target=parse_thepaper) ts8 = Thread(target=parse_weixin) ts9 = Thread(target=parse_zaobao) ts10 = Thread(target=parse_cnbeta) ts11 = Thread(target=parse_cctv) ts12 = Thread(target=parse_ithome) ts13 = Thread(target=parse_guokr) ts14 = Thread(target=parse_mop) ts16 = Thread(target=parse_hacpai, args=("play",)) ts17 = Thread(target=parse_hacpai, args=("hot",)) ts18 = Thread(target=parse_zhihu_daily) ts19 = Thread(target=parse_jandan) ts20 = Thread(target=parse_huxiu) ts21 = Thread(target=parse_chouti) ts22 = Thread(target=parse_tianya) ts23 = Thread(target=parse_douban) ts24 = Thread(target=parse_v2ex) ts25 = Thread(target=parse_tieba) ts26 = Thread(target=parse_weibo) ts27 = Thread(target=parse_baidu, args=("now",)) ts28 = Thread(target=parse_baidu, args=("today",)) ts29 = Thread(target=parse_baidu, args=("week",)) ts30 = Thread(target=parse_zhihu_hot) ts31 = Thread(target=parse_zhihu_good) ts32 = Thread(target=parse_itunes, args=("free", "cn",)) ts33 = Thread(target=parse_itunes, args=("paid", "cn",)) ts34 = Thread(target=parse_itunes, args=("revenue", "cn",)) ts35 = Thread(target=parse_itunes, args=("free", "us",)) ts36 = Thread(target=parse_itunes, args=("paid", "us",)) ts37 = Thread(target=parse_itunes, args=("revenue", "us",)) ts38 = Thread(target=parse_smzdm_article, args=("today",)) ts39 = Thread(target=parse_smzdm_article, args=("week",)) ts40 = Thread(target=parse_smzdm_article, args=("month",)) threads.append(ts38) threads.append(ts32) threads.append(ts2) threads.append(ts3) threads.append(ts27) threads.append(ts4) threads.append(ts5) threads.append(ts6) threads.append(ts33) threads.append(ts7) threads.append(ts8) threads.append(ts9) threads.append(ts10) threads.append(ts11) threads.append(ts34) threads.append(ts12) threads.append(ts28) threads.append(ts13) threads.append(ts14) threads.append(ts30) threads.append(ts16) threads.append(ts18) threads.append(ts35) threads.append(ts39) threads.append(ts19) threads.append(ts20) threads.append(ts21) threads.append(ts22) threads.append(ts36) threads.append(ts23) threads.append(ts24) threads.append(ts25) threads.append(ts26) threads.append(ts17) threads.append(ts29) threads.append(ts31) threads.append(ts37) threads.append(ts40) for t in threads: t.start() for t in threads: t.join() print("多线程采集完成", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) print("耗时:", time.time() - t1) # 因头条接口短时间请求时相同内容太多,此处的首次更新并发是为了避免前端内容太少或浏览器控制台报错(强迫症) def multi_toutiao(): t1 = time.time() threads = [] ts1 = Thread(target=parse_toutiao, args=("a",)) ts2 = Thread(target=parse_toutiao, args=("b",)) ts3 = Thread(target=parse_toutiao, args=("c",)) ts4 = Thread(target=parse_toutiao, args=("d",)) ts5 = Thread(target=parse_toutiao, args=("e",)) ts6 = Thread(target=parse_toutiao, args=("f",)) ts7 = Thread(target=parse_toutiao, args=("g",)) ts8 = Thread(target=parse_toutiao, args=("h",)) ts9 = Thread(target=parse_toutiao, args=("i",)) ts10 = Thread(target=parse_toutiao, args=("j",)) threads.append(ts1) threads.append(ts2) threads.append(ts3) threads.append(ts4) threads.append(ts5) threads.append(ts6) threads.append(ts7) threads.append(ts8) threads.append(ts9) threads.append(ts10) for t in threads: t.start() for t in threads: t.join() print("首次运行采集头条完成") print("耗时:", time.time() - t1) if __name__ == "__main__": if thread == 'single': while True: single_run() single_toutiao() single_run() single_toutiao_b() if thread == 'multi': multi_toutiao() while True: multi_run() single_toutiao() multi_run() single_toutiao_b()
inference_images.py
""" Inference images: Extract matting on images. Example: python inference_images.py \ --model-type mattingrefine \ --model-backbone resnet50 \ --model-backbone-scale 0.25 \ --model-refine-mode sampling \ --model-refine-sample-pixels 80000 \ --model-checkpoint "PATH_TO_CHECKPOINT" \ --images-src "PATH_TO_IMAGES_SRC_DIR" \ --images-bgr "PATH_TO_IMAGES_BGR_DIR" \ --output-dir "PATH_TO_OUTPUT_DIR" \ --output-type com fgr pha """ import argparse import torch import os import shutil from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from torchvision import transforms as T from torchvision.transforms.functional import to_pil_image from threading import Thread from tqdm import tqdm from dataset import ImagesDataset, ZipDataset from dataset import augmentation as A from model import MattingBase, MattingRefine from inference_utils import HomographicAlignment # --------------- Arguments --------------- parser = argparse.ArgumentParser(description='Inference images') parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine']) parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2']) parser.add_argument('--model-backbone-scale', type=float, default=0.25) parser.add_argument('--model-checkpoint', type=str, required=True) parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding']) parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000) parser.add_argument('--model-refine-threshold', type=float, default=0.7) parser.add_argument('--model-refine-kernel-size', type=int, default=3) parser.add_argument('--images-src', type=str, required=True) parser.add_argument('--images-bgr', type=str, required=True) parser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cuda') parser.add_argument('--preprocess-alignment', action='store_true') parser.add_argument('--output-dir', type=str, required=True) parser.add_argument('--output-types', type=str, required=True, nargs='+', choices=['com', 'pha', 'fgr', 'err', 'ref']) parser.add_argument('-y', action='store_true') args = parser.parse_args() assert 'err' not in args.output_types or args.model_type in ['mattingbase', 'mattingrefine'], \ 'Only mattingbase and mattingrefine support err output' assert 'ref' not in args.output_types or args.model_type in ['mattingrefine'], \ 'Only mattingrefine support ref output' # --------------- Main --------------- device = torch.device(args.device) # Load model if args.model_type == 'mattingbase': model = MattingBase(args.model_backbone) if args.model_type == 'mattingrefine': model = MattingRefine( args.model_backbone, args.model_backbone_scale, args.model_refine_mode, args.model_refine_sample_pixels, args.model_refine_threshold, args.model_refine_kernel_size) model = model.to(device).eval() model.load_state_dict(torch.load(args.model_checkpoint), strict=False) # Load images dataset = ZipDataset([ ImagesDataset(args.images_src), ImagesDataset(args.images_bgr), ], assert_equal_length=True, transforms=A.PairCompose([ HomographicAlignment() if args.preprocess_alignment else A.PairApply(nn.Identity()), A.PairApply(T.ToTensor()) ])) dataloader = DataLoader(dataset, batch_size=1, num_workers=8, pin_memory=True) # Create output directory if os.path.exists(args.output_dir): if args.y or input(f'Directory {args.output_dir} already exists. Override? [Y/N]: ').lower() == 'y': shutil.rmtree(args.output_dir) else: exit() for output_type in args.output_types: os.makedirs(os.path.join(args.output_dir, output_type)) # Worker function def writer(img, path): img = to_pil_image(img[0].cpu()) img.save(path) # Conversion loop with torch.no_grad(): for i, (src, bgr) in enumerate(tqdm(dataloader)): src = src.to(device, non_blocking=True) bgr = bgr.to(device, non_blocking=True) if args.model_type == 'mattingbase': pha, fgr, err, _ = model(src, bgr) elif args.model_type == 'mattingrefine': pha, fgr, _, _, err, ref = model(src, bgr) pathname = dataset.datasets[0].filenames[i] pathname = os.path.relpath(pathname, args.images_src) pathname = os.path.splitext(pathname)[0] if 'com' in args.output_types: com = torch.cat([fgr * pha.ne(0), pha], dim=1) Thread(target=writer, args=(com, os.path.join(args.output_dir, 'com', pathname + '.png'))).start() if 'pha' in args.output_types: Thread(target=writer, args=(pha, os.path.join(args.output_dir, 'pha', pathname + '.jpg'))).start() if 'fgr' in args.output_types: Thread(target=writer, args=(fgr, os.path.join(args.output_dir, 'fgr', pathname + '.jpg'))).start() if 'err' in args.output_types: err = F.interpolate(err, src.shape[2:], mode='bilinear', align_corners=False) Thread(target=writer, args=(err, os.path.join(args.output_dir, 'err', pathname + '.jpg'))).start() if 'ref' in args.output_types: ref = F.interpolate(ref, src.shape[2:], mode='nearest') Thread(target=writer, args=(ref, os.path.join(args.output_dir, 'ref', pathname + '.jpg'))).start()
scheduler.py
#!/usr/bin/env python import os import pickle import sys import time import socket import random from optparse import OptionParser import threading import subprocess from operator import itemgetter import logging import signal import getpass import zmq ctx = zmq.Context() import dpark.pymesos as mesos import dpark.pymesos.mesos_pb2 as mesos_pb2 class Task: def __init__(self, id): self.id = id self.tried = 0 self.state = -1 self.state_time = 0 REFUSE_FILTER = mesos_pb2.Filters() REFUSE_FILTER.refuse_seconds = 10*60 # 10 mins def parse_mem(m): try: return float(m) except ValueError: number, unit = float(m[:-1]), m[-1].lower() if unit == 'g': number *= 1024 elif unit == 'k': number /= 1024 return number def safe(f): def _(self, *a, **kw): with self.lock: r = f(self, *a, **kw) return r return _ class SubmitScheduler(object): def __init__(self, options, command): self.framework_id = None self.framework = mesos_pb2.FrameworkInfo() self.framework.user = getpass.getuser() if self.framework.user == 'root': raise Exception("drun is not allowed to run as 'root'") name = '[drun@%s] ' % socket.gethostname() + ' '.join(sys.argv[1:]) if len(name) > 512: name = name[:512] + '...' self.framework.name = name self.executor = self.getExecutorInfo() self.cpus = options.cpus self.mem = parse_mem(options.mem) self.options = options self.command = command self.total_tasks = list(reversed([Task(i) for i in range(options.start, options.tasks)])) self.task_launched = {} self.slaveTasks = {} self.started = False self.stopped = False self.status = 0 self.next_try = 0 self.lock = threading.RLock() self.last_offer_time = time.time() def getExecutorInfo(self): frameworkDir = os.path.abspath(os.path.dirname(sys.argv[0])) executorPath = os.path.join(frameworkDir, "executor.py") execInfo = mesos_pb2.ExecutorInfo() execInfo.executor_id.value = "default" execInfo.command.value = executorPath if hasattr(execInfo, 'framework_id'): execInfo.framework_id.value = str(self.framework_id) return execInfo def create_port(self, output): sock = ctx.socket(zmq.PULL) host = socket.gethostname() port = sock.bind_to_random_port("tcp://0.0.0.0") def redirect(): poller = zmq.Poller() poller.register(sock, zmq.POLLIN) while True: socks = poller.poll(100) if not socks: if self.stopped: break continue line = sock.recv() output.write(line) t = threading.Thread(target=redirect) t.daemon = True t.start() return t, "tcp://%s:%d" % (host, port) @safe def registered(self, driver, fid, masterInfo): logging.debug("Registered with Mesos, FID = %s" % fid.value) self.framework_id = fid.value self.std_t, self.std_port = self.create_port(sys.stdout) self.err_t, self.err_port = self.create_port(sys.stderr) def getResource(self, offer): cpus, mem = 0, 0 for r in offer.resources: if r.name == 'cpus': cpus = float(r.scalar.value) elif r.name == 'mem': mem = float(r.scalar.value) return cpus, mem def getAttributes(self, offer): attrs = {} for a in offer.attributes: attrs[a.name] = a.text.value return attrs @safe def resourceOffers(self, driver, offers): tpn = self.options.task_per_node random.shuffle(offers) self.last_offer_time = time.time() for offer in offers: attrs = self.getAttributes(offer) if self.options.group and attrs.get('group', 'None') not in self.options.group: driver.launchTasks(offer.id, [], REFUSE_FILTER) continue cpus, mem = self.getResource(offer) logging.debug("got resource offer %s: cpus:%s, mem:%s at %s", offer.id.value, cpus, mem, offer.hostname) sid = offer.slave_id.value tasks = [] while (self.total_tasks and cpus+1e-4 >= self.cpus and mem >= self.mem and (tpn ==0 or tpn > 0 and len(self.slaveTasks.get(sid,set())) < tpn)): logging.debug("Accepting slot on slave %s (%s)", offer.slave_id.value, offer.hostname) t = self.total_tasks.pop() task = self.create_task(offer, t, cpus) tasks.append(task) t.state = mesos_pb2.TASK_STARTING t.state_time = time.time() self.task_launched[t.id] = t self.slaveTasks.setdefault(sid, set()).add(t.id) cpus -= self.cpus mem -= self.mem if not self.total_tasks: break logging.debug("dispatch %d tasks to slave %s", len(tasks), offer.hostname) driver.launchTasks(offer.id, tasks, REFUSE_FILTER) def create_task(self, offer, t, cpus): task = mesos_pb2.TaskInfo() task.task_id.value = "%d-%d" % (t.id, t.tried) task.slave_id.value = offer.slave_id.value task.name = "task %s/%d" % (t.id, self.options.tasks) task.executor.MergeFrom(self.executor) env = dict(os.environ) env['DRUN_RANK'] = str(t.id) env['DRUN_SIZE'] = str(self.options.tasks) command = self.command[:] if self.options.expand: for i, x in enumerate(command): command[i] = x % {'RANK': t.id, 'SIZE': self.options.tasks} task.data = pickle.dumps([os.getcwd(), command, env, self.options.shell, self.std_port, self.err_port, None]) cpu = task.resources.add() cpu.name = "cpus" cpu.type = 0 # mesos_pb2.Value.SCALAR cpu.scalar.value = min(self.cpus, cpus) mem = task.resources.add() mem.name = "mem" mem.type = 0 # mesos_pb2.Value.SCALAR mem.scalar.value = min(self.mem, mem) return task @safe def statusUpdate(self, driver, update): logging.debug("Task %s in state %d" % (update.task_id.value, update.state)) tid = int(update.task_id.value.split('-')[0]) if tid not in self.task_launched: # check failed after launched for t in self.total_tasks: if t.id == tid: self.task_launched[tid] = t self.total_tasks.remove(t) break else: logging.debug("Task %d is finished, ignore it", tid) return t = self.task_launched[tid] t.state = update.state t.state_time = time.time() if update.state == mesos_pb2.TASK_RUNNING: self.started = True elif update.state == mesos_pb2.TASK_LOST: logging.warning("Task %s was lost, try again", tid) if not self.total_tasks: driver.reviveOffers() # request more offers again t.tried += 1 t.state = -1 self.task_launched.pop(tid) self.total_tasks.append(t) elif update.state in (mesos_pb2.TASK_FINISHED, mesos_pb2.TASK_FAILED): t = self.task_launched.pop(tid) slave = None for s in self.slaveTasks: if tid in self.slaveTasks[s]: slave = s self.slaveTasks[s].remove(tid) break if update.state >= mesos_pb2.TASK_FAILED: if t.tried < self.options.retry: t.tried += 1 logging.warning("task %d failed with %d, retry %d", t.id, update.state, t.tried) if not self.total_tasks: driver.reviveOffers() # request more offers again self.total_tasks.append(t) # try again else: logging.error("task %d failed with %d on %s", t.id, update.state, slave) self.stop(1) if not self.task_launched and not self.total_tasks: self.stop(0) @safe def check(self, driver): now = time.time() for tid, t in self.task_launched.items(): if t.state == mesos_pb2.TASK_STARTING and t.state_time + 30 < now: logging.warning("task %d lauched failed, assign again", tid) if not self.total_tasks: driver.reviveOffers() # request more offers again t.tried += 1 t.state = -1 self.task_launched.pop(tid) self.total_tasks.append(t) # TODO: check run time @safe def offerRescinded(self, driver, offer): logging.debug("resource rescinded: %s", offer) # task will retry by checking @safe def slaveLost(self, driver, slave): logging.warning("slave %s lost", slave.value) @safe def error(self, driver, code, message): logging.error("Error from Mesos: %s (error code: %d)" % (message, code)) @safe def stop(self, status): if self.stopped: return self.stopped = True self.status = status self.std_t.join() self.err_t.join() logging.debug("scheduler stopped") class MPIScheduler(SubmitScheduler): def __init__(self, options, command): SubmitScheduler.__init__(self, options, command) self.used_hosts = {} self.used_tasks = {} self.id = 0 self.p = None self.publisher = ctx.socket(zmq.PUB) port = self.publisher.bind_to_random_port('tcp://0.0.0.0') host = socket.gethostname() self.publisher_port = 'tcp://%s:%d' % (host, port) def start_task(self, driver, offer, k): t = Task(self.id) self.id += 1 self.task_launched[t.id] = t self.used_tasks[t.id] = (offer.hostname, k) task = self.create_task(offer, t, k) logging.debug("lauching %s task with offer %s on %s, slots %d", t.id, offer.id.value, offer.hostname, k) driver.launchTasks(offer.id, [task], REFUSE_FILTER) @safe def resourceOffers(self, driver, offers): random.shuffle(offers) launched = sum(self.used_hosts.values()) self.last_offer_time = time.time() for offer in offers: cpus, mem = self.getResource(offer) logging.debug("got resource offer %s: cpus:%s, mem:%s at %s", offer.id.value, cpus, mem, offer.hostname) if launched >= self.options.tasks or offer.hostname in self.used_hosts: driver.launchTasks(offer.id, [], REFUSE_FILTER) continue attrs = self.getAttributes(offer) if self.options.group and attrs.get('group', 'None') not in self.options.group: driver.launchTasks(offer.id, [], REFUSE_FILTER) continue slots = int(min(cpus/self.cpus, mem/self.mem) + 1e-5) if self.options.task_per_node: slots = min(slots, self.options.task_per_node) slots = min(slots, self.options.tasks - launched) if slots >= 1: self.used_hosts[offer.hostname] = slots launched += slots self.start_task(driver, offer, slots) else: driver.launchTasks(offer.id, [], REFUSE_FILTER) if launched < self.options.tasks: logging.warning('not enough offers: need %d offer %d, waiting more resources', self.options.tasks, launched) @safe def statusUpdate(self, driver, update): logging.debug("Task %s in state %d" % (update.task_id.value, update.state)) tid = int(update.task_id.value.split('-')[0]) if tid not in self.task_launched: logging.error("Task %d not in task_launched", tid) return t = self.task_launched[tid] t.state = update.state t.state_time = time.time() hostname, slots = self.used_tasks[tid] if update.state == mesos_pb2.TASK_RUNNING: launched = sum(self.used_hosts.values()) ready = all(t.state == mesos_pb2.TASK_RUNNING for t in self.task_launched.values()) if launched == self.options.tasks and ready: logging.debug("all tasks are ready, start to run") self.start_mpi() elif update.state in (mesos_pb2.TASK_LOST, mesos_pb2.TASK_FAILED): if not self.started: logging.warning("Task %s was lost, try again", tid) driver.reviveOffers() # request more offers again t.tried += 1 t.state = -1 self.used_hosts.pop(hostname) self.used_tasks.pop(tid) self.task_launched.pop(tid) else: logging.error("Task %s failed, cancel all tasks", tid) self.stop(1) elif update.state == mesos_pb2.TASK_FINISHED: if not self.started: logging.warning("Task %s has not started, ignore it %s", tid, update.state) return t = self.task_launched.pop(tid) if not self.task_launched: self.stop(0) @safe def check(self, driver): now = time.time() for tid, t in self.task_launched.items(): if t.state == mesos_pb2.TASK_STARTING and t.state_time + 30 < now: logging.warning("task %d lauched failed, assign again", tid) driver.reviveOffers() # request more offers again t.tried += 1 t.state = -1 hostname, slots = self.used_tasks[tid] self.used_hosts.pop(hostname) self.used_tasks.pop(tid) self.task_launched.pop(tid) def create_task(self, offer, t, k): task = mesos_pb2.TaskInfo() task.task_id.value = "%s-%s" % (t.id, t.tried) task.slave_id.value = offer.slave_id.value task.name = "task %s" % t.id task.executor.MergeFrom(self.executor) env = dict(os.environ) task.data = pickle.dumps([os.getcwd(), None, env, self.options.shell, self.std_port, self.err_port, self.publisher_port]) cpus, mem = self.getResource(offer) cpu = task.resources.add() cpu.name = "cpus" cpu.type = 0 #mesos_pb2.Value.SCALAR cpu.scalar.value = min(self.cpus * k, cpus) mem = task.resources.add() mem.name = "mem" mem.type = 0 #mesos_pb2.Value.SCALAR mem.scalar.value = min(self.mem * k, mem) return task def start_mpi(self): try: slaves = self.try_to_start_mpi(self.command, self.options.tasks, self.used_hosts.items()) except Exception: self.broadcast_command({}) self.next_try = time.time() + random.randint(5,10) return commands = dict(zip(self.used_hosts.keys(), slaves)) self.broadcast_command(commands) self.started = True def broadcast_command(self, command): def repeat_pub(): for i in xrange(10): self.publisher.send(pickle.dumps(command)) time.sleep(1) if self.stopped: break t = threading.Thread(target=repeat_pub) t.deamon = True t.start() return t def try_to_start_mpi(self, command, tasks, items): if self.p: try: self.p.kill() except: pass hosts = ','.join("%s:%d" % (hostname, slots) for hostname, slots in items) logging.debug("choosed hosts: %s", hosts) cmd = ['mpirun', '-prepend-rank', '-launcher', 'none', '-hosts', hosts, '-np', str(tasks)] + command self.p = p = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE) slaves = [] prefix = 'HYDRA_LAUNCH: ' while True: line = p.stdout.readline() if not line: break if line.startswith(prefix): slaves.append(line[len(prefix):-1].strip()) if line == 'HYDRA_LAUNCH_END\n': break if len(slaves) != len(items): logging.error("hosts: %s, slaves: %s", items, slaves) raise Exception("slaves not match with hosts") def output(f): while True: line = f.readline() if not line: break sys.stdout.write(line) self.tout = t = threading.Thread(target=output, args=[p.stdout]) t.deamon = True t.start() return slaves @safe def stop(self, status): if self.started: try: self.p.kill() self.p.wait() except: pass self.tout.join() self.publisher.close() super(MPIScheduler, self).stop(status) if __name__ == "__main__": parser = OptionParser(usage="Usage: drun [options] <command>") parser.allow_interspersed_args=False parser.add_option("-s", "--master", type="string", default="zk://zk1:2181,zk2:2181,zk3:2181,zk4:2181,zk5:2181/mesos_master2", help="url of master (default: zookeeper") parser.add_option("-i", "--mpi", action="store_true", help="run MPI tasks") parser.add_option("-n", "--tasks", type="int", default=1, help="number task to launch (default: 1)") parser.add_option("-b", "--start", type="int", default=0, help="which task to start (default: 0)") parser.add_option("-p", "--task_per_node", type="int", default=0, help="max number of tasks on one node (default: 0)") parser.add_option("-r","--retry", type="int", default=0, help="retry times when failed (default: 0)") parser.add_option("-t", "--timeout", type="int", default=3600*24, help="timeout of job in seconds (default: 86400)") parser.add_option("-c","--cpus", type="float", default=1.0, help="number of CPUs per task (default: 1)") parser.add_option("-m","--mem", type="string", default='100m', help="MB of memory per task (default: 100m)") parser.add_option("-g","--group", type="string", default='', help="which group to run (default: ''") parser.add_option("--expand", action="store_true", help="expand expression in command line") parser.add_option("--shell", action="store_true", help="using shell re-intepret the cmd args") # parser.add_option("--kill", type="string", default="", # help="kill a job with frameword id") parser.add_option("-q", "--quiet", action="store_true", help="be quiet", ) parser.add_option("-v", "--verbose", action="store_true", help="show more useful log", ) (options, command) = parser.parse_args() if options.master.startswith('mesos://'): if '@' in options.master: options.master = options.master[options.master.rfind('@')+1:] else: options.master = options.master[options.master.rfind('//')+2:] elif options.master.startswith('zoo://'): options.master = 'zk' + options.master[3:] if ':' not in options.master: options.master += ':5050' # if options.kill: # sched = MPIScheduler(options, command) # fid = mesos_pb2.FrameworkID() # fid.value = options.kill # driver = mesos.MesosSchedulerDriver(sched, sched.framework, # options.master, fid) # driver.start() # driver.stop(False) # sys.exit(0) if not command: parser.print_help() sys.exit(2) logging.basicConfig(format='[drun] %(threadName)s %(asctime)-15s %(message)s', level=options.quiet and logging.ERROR or options.verbose and logging.DEBUG or logging.WARNING) if options.mpi: if options.retry > 0: logging.error("MPI application can not retry") options.retry = 0 sched = MPIScheduler(options, command) else: sched = SubmitScheduler(options, command) logging.debug("Connecting to mesos master %s", options.master) driver = mesos.MesosSchedulerDriver(sched, sched.framework, options.master) driver.start() def handler(signm, frame): logging.warning("got signal %d, exit now", signm) sched.stop(3) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGABRT, handler) signal.signal(signal.SIGQUIT, handler) try: from rfoo.utils import rconsole rconsole.spawn_server(locals(), 0) except ImportError: pass start = time.time() try: while not sched.stopped: time.sleep(0.1) now = time.time() sched.check(driver) if not sched.started and sched.next_try > 0 and now > sched.next_try: sched.next_try = 0 driver.reviveOffers() if not sched.started and now > sched.last_offer_time + 60 + random.randint(0,5): logging.warning("too long to get offer, reviving...") sched.last_offer_time = now driver.reviveOffers() if now - start > options.timeout: logging.warning("job timeout in %d seconds", options.timeout) sched.stop(2) break except KeyboardInterrupt: logging.warning('stopped by KeyboardInterrupt') sched.stop(4) driver.stop(False) ctx.term() sys.exit(sched.status)
basic_gpu_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for basic component wise operations using a GPU device.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import threading import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args from tensorflow.python.platform import test class GPUBinaryOpsTest(test.TestCase): def _compareGPU(self, x, y, np_func, tf_func): with self.test_session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = sess.run(out) with self.test_session(use_gpu=False) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_cpu = sess.run(out) self.assertAllClose(tf_cpu, tf_gpu) def testFloatBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv) self._compareGPU(x, y, np.power, math_ops.pow) def testFloatWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) class MathBuiltinUnaryTest(test.TestCase): def _compare(self, x, np_func, tf_func, use_gpu): np_out = np_func(x) with self.test_session(use_gpu=use_gpu) as sess: inx = ops.convert_to_tensor(x) ofunc = tf_func(inx) tf_out = sess.run(ofunc) self.assertAllClose(np_out, tf_out) def _inv(self, x): return 1.0 / x def _rsqrt(self, x): return self._inv(np.sqrt(x)) def _testDtype(self, dtype, use_gpu): data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype) self._compare(data, np.abs, math_ops.abs, use_gpu) self._compare(data, np.arccos, math_ops.acos, use_gpu) self._compare(data, np.arcsin, math_ops.asin, use_gpu) self._compare(data, np.arctan, math_ops.atan, use_gpu) self._compare(data, np.ceil, math_ops.ceil, use_gpu) self._compare(data, np.cos, math_ops.cos, use_gpu) self._compare(data, np.exp, math_ops.exp, use_gpu) self._compare(data, np.floor, math_ops.floor, use_gpu) self._compare(data, np.log, math_ops.log, use_gpu) self._compare(data, np.log1p, math_ops.log1p, use_gpu) self._compare(data, np.negative, math_ops.negative, use_gpu) self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu) self._compare(data, np.sin, math_ops.sin, use_gpu) self._compare(data, np.sqrt, math_ops.sqrt, use_gpu) self._compare(data, np.square, math_ops.square, use_gpu) self._compare(data, np.tan, math_ops.tan, use_gpu) self._compare(data, np.tanh, math_ops.tanh, use_gpu) def testTypes(self): for dtype in [np.float32]: self._testDtype(dtype, use_gpu=True) def testFloorDevide(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) np_out = np.floor_divide(x, y + 0.1) with self.test_session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y + 0.1) ofunc = inx / iny out_func2 = math_ops.floor(ofunc) tf_out = sess.run(out_func2) self.assertAllClose(np_out, tf_out) class BroadcastSimpleTest(test.TestCase): def _GetGradientArgs(self, xs, ys): with self.test_session(use_gpu=True) as sess: return sess.run(_broadcast_gradient_args(xs, ys)) def testBroadcast(self): r0, r1 = self._GetGradientArgs([2, 3, 5], [1]) self.assertAllEqual(r0, []) self.assertAllEqual(r1, [0, 1, 2]) _GRAD_TOL = {dtypes.float32: 1e-3} def _compareGradientX(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.test_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) xs = list(x.shape) jacob_t, jacob_n = gradient_checker.compute_gradient( inx, xs, out, zs, x_init_value=x) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGradientY(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.test_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) ys = list(np.shape(y)) jacob_t, jacob_n = gradient_checker.compute_gradient( iny, ys, out, zs, x_init_value=y) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGpu(self, x, y, np_func, tf_func): np_ans = np_func(x, y) with self.test_session(use_gpu=True): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = out.eval() self.assertAllClose(np_ans, tf_gpu) self.assertShapeEqual(np_ans, out) # TODO(zhifengc/ke): make gradient checker work on GPU. def testGradient(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) self._compareGradientX(x, y, np.true_divide, math_ops.truediv) self._compareGradientY(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv) class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase): """Tests concurrent sessions executing on the same GPU.""" def _run_session(self, results): n_iterations = 500 with self.test_session(use_gpu=True) as s: data = variables.Variable(1.0) with ops.device('/gpu:0'): random_seed.set_random_seed(1) matrix1 = variables.Variable( random_ops.truncated_normal([1024, 1]), name='matrix1') matrix2 = variables.Variable( random_ops.truncated_normal([1, 1024]), name='matrix2') x1 = math_ops.multiply(data, matrix1, name='x1') x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1)) x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4') s.run(variables.global_variables_initializer()) for _ in xrange(n_iterations): value = s.run(x4) results.append(value) if value != results[0]: break def testConcurrentSessions(self): if not test.is_gpu_available(): return n_threads = 4 results = [[]] * n_threads threads = [ threading.Thread(target=self._run_session, args=(results[i],)) for i in xrange(n_threads) ] for thread in threads: thread.start() for thread in threads: thread.join() flat_results = [x for x in itertools.chain(*results)] self.assertNotEqual(0, len(flat_results)) for result in flat_results: self.assertEqual(result, flat_results[0]) if __name__ == '__main__': test.main()
a3c.py
#!/usr/bin/env python from skimage.transform import resize from skimage.color import rgb2gray import threading import tensorflow as tf import sys import random import numpy as np import time import gym from keras import backend as K from keras.layers import Convolution2D, Flatten, Dense from collections import deque from a3c_model import build_policy_and_value_networks from keras import backend as K from atari_environment import AtariEnvironment # Path params EXPERIMENT_NAME = "breakout_a3c" SUMMARY_SAVE_PATH = "/Users/coreylynch/dev/async-rl/summaries/"+EXPERIMENT_NAME CHECKPOINT_SAVE_PATH = "/tmp/"+EXPERIMENT_NAME+".ckpt" CHECKPOINT_NAME = "/tmp/breakout_a3c.ckpt-5" CHECKPOINT_INTERVAL=5000 SUMMARY_INTERVAL=5 # TRAINING = False TRAINING = True SHOW_TRAINING = True # SHOW_TRAINING = False # Experiment params GAME = "Breakout-v0" ACTIONS = 3 NUM_CONCURRENT = 8 NUM_EPISODES = 20000 AGENT_HISTORY_LENGTH = 4 RESIZED_WIDTH = 84 RESIZED_HEIGHT = 84 # DQN Params GAMMA = 0.99 # Optimization Params LEARNING_RATE = 0.00001 #Shared global parameters T = 0 TMAX = 80000000 t_max = 32 def sample_policy_action(num_actions, probs): """ Sample an action from an action probability distribution output by the policy network. """ # Subtract a tiny value from probabilities in order to avoid # "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial probs = probs - np.finfo(np.float32).epsneg histogram = np.random.multinomial(1, probs) action_index = int(np.nonzero(histogram)[0]) return action_index def actor_learner_thread(num, env, session, graph_ops, summary_ops, saver): # We use global shared counter T, and TMAX constant global TMAX, T # Unpack graph ops s, a, R, minimize, p_network, v_network = graph_ops # Unpack tensorboard summary stuff r_summary_placeholder, update_ep_reward, val_summary_placeholder, update_ep_val, summary_op = summary_ops # Wrap env with AtariEnvironment helper class env = AtariEnvironment(gym_env=env, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT, agent_history_length=AGENT_HISTORY_LENGTH) time.sleep(5*num) # Set up per-episode counters ep_reward = 0 ep_avg_v = 0 v_steps = 0 ep_t = 0 probs_summary_t = 0 s_t = env.get_initial_state() terminal = False while T < TMAX: s_batch = [] past_rewards = [] a_batch = [] t = 0 t_start = t while not (terminal or ((t - t_start) == t_max)): # Perform action a_t according to policy pi(a_t | s_t) probs = session.run(p_network, feed_dict={s: [s_t]})[0] action_index = sample_policy_action(ACTIONS, probs) a_t = np.zeros([ACTIONS]) a_t[action_index] = 1 if probs_summary_t % 100 == 0: print("P, ", np.max(probs), "V ", session.run(v_network, feed_dict={s: [s_t]})[0][0]) s_batch.append(s_t) a_batch.append(a_t) s_t1, r_t, terminal, info = env.step(action_index) ep_reward += r_t r_t = np.clip(r_t, -1, 1) past_rewards.append(r_t) t += 1 T += 1 ep_t += 1 probs_summary_t += 1 s_t = s_t1 if terminal: R_t = 0 else: R_t = session.run(v_network, feed_dict={s: [s_t]})[0][0] # Bootstrap from last state R_batch = np.zeros(t) for i in reversed(list(range(t_start, t))): R_t = past_rewards[i] + GAMMA * R_t R_batch[i] = R_t session.run(minimize, feed_dict={R : R_batch, a : a_batch, s : s_batch}) # Save progress every 5000 iterations if T % CHECKPOINT_INTERVAL == 0: saver.save(session, CHECKPOINT_SAVE_PATH, global_step = T) if terminal: # Episode ended, collect stats and reset game session.run(update_ep_reward, feed_dict={r_summary_placeholder: ep_reward}) print("THREAD:", num, "/ TIME", T, "/ REWARD", ep_reward) s_t = env.get_initial_state() terminal = False # Reset per-episode counters ep_reward = 0 ep_t = 0 def build_graph(): # Create shared global policy and value networks s, p_network, v_network, p_params, v_params = build_policy_and_value_networks(num_actions=ACTIONS, agent_history_length=AGENT_HISTORY_LENGTH, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT) # Shared global optimizer optimizer = tf.train.AdamOptimizer(LEARNING_RATE) # Op for applying remote gradients R_t = tf.placeholder("float", [None]) a_t = tf.placeholder("float", [None, ACTIONS]) log_prob = tf.log(tf.reduce_sum(p_network * a_t, reduction_indices=1)) p_loss = -log_prob * (R_t - v_network) v_loss = tf.reduce_mean(tf.square(R_t - v_network)) total_loss = p_loss + (0.5 * v_loss) minimize = optimizer.minimize(total_loss) return s, a_t, R_t, minimize, p_network, v_network # Set up some episode summary ops to visualize on tensorboard. def setup_summaries(): episode_reward = tf.Variable(0.) tf.summary.scalar("Episode Reward", episode_reward) r_summary_placeholder = tf.placeholder("float") update_ep_reward = episode_reward.assign(r_summary_placeholder) ep_avg_v = tf.Variable(0.) tf.summary.scalar("Episode Value", ep_avg_v) val_summary_placeholder = tf.placeholder("float") update_ep_val = ep_avg_v.assign(val_summary_placeholder) summary_op = tf.summary.merge_all() return r_summary_placeholder, update_ep_reward, val_summary_placeholder, update_ep_val, summary_op def train(session, graph_ops, saver): # Set up game environments (one per thread) envs = [gym.make(GAME) for i in range(NUM_CONCURRENT)] summary_ops = setup_summaries() summary_op = summary_ops[-1] # Initialize variables session.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter(SUMMARY_SAVE_PATH, session.graph) # Start NUM_CONCURRENT training threads actor_learner_threads = [threading.Thread(target=actor_learner_thread, args=(thread_id, envs[thread_id], session, graph_ops, summary_ops, saver)) for thread_id in range(NUM_CONCURRENT)] for t in actor_learner_threads: t.start() # Show the agents training and write summary statistics last_summary_time = 0 while True: if SHOW_TRAINING: for env in envs: env.render() now = time.time() if now - last_summary_time > SUMMARY_INTERVAL: summary_str = session.run(summary_op) writer.add_summary(summary_str, float(T)) last_summary_time = now for t in actor_learner_threads: t.join() def evaluation(session, graph_ops, saver): saver.restore(session, CHECKPOINT_NAME) print("Restored model weights from ", CHECKPOINT_NAME) monitor_env = gym.make(GAME) monitor_env.monitor.start('/tmp/'+EXPERIMENT_NAME+"/eval") # Unpack graph ops s, a_t, R_t, minimize, p_network, v_network = graph_ops # Wrap env with AtariEnvironment helper class env = AtariEnvironment(gym_env=monitor_env, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT, agent_history_length=AGENT_HISTORY_LENGTH) for i_episode in range(100): s_t = env.get_initial_state() ep_reward = 0 terminal = False while not terminal: monitor_env.render() # Forward the deep q network, get Q(s,a) values probs = p_network.eval(session = session, feed_dict = {s : [s_t]})[0] action_index = sample_policy_action(ACTIONS, probs) s_t1, r_t, terminal, info = env.step(action_index) s_t = s_t1 ep_reward += r_t print(ep_reward) monitor_env.monitor.close() def main(_): g = tf.Graph() with g.as_default(), tf.Session() as session: K.set_session(session) graph_ops = build_graph() saver = tf.train.Saver() if TRAINING: train(session, graph_ops, saver) else: evaluation(session, graph_ops, saver) if __name__ == "__main__": tf.app.run()
regrtest.py
#! /usr/bin/env python """ Usage: python -m test.regrtest [options] [test_name1 [test_name2 ...]] python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]] If no arguments or options are provided, finds all files matching the pattern "test_*" in the Lib/test subdirectory and runs them in alphabetical order (but see -M and -u, below, for exceptions). For more rigorous testing, it is useful to use the following command line: python -E -tt -Wd -3 -m test.regrtest [options] [test_name1 ...] Options: -h/--help -- print this text and exit Verbosity -v/--verbose -- run tests in verbose mode with output to stdout -w/--verbose2 -- re-run failed tests in verbose mode -W/--verbose3 -- re-run failed tests in verbose mode immediately -q/--quiet -- no output unless one or more tests fail -S/--slow -- print the slowest 10 tests --header -- print header with interpreter info Selecting tests -r/--randomize -- randomize test execution order (see below) --randseed -- pass a random seed to reproduce a previous random run -f/--fromfile -- read names of tests to run from a file (see below) -x/--exclude -- arguments are tests to *exclude* -s/--single -- single step through a set of tests (see below) -u/--use RES1,RES2,... -- specify which special resource intensive tests to run -M/--memlimit LIMIT -- run very large memory-consuming tests Special runs -l/--findleaks -- if GC is available detect tests that leak memory -L/--runleaks -- run the leaks(1) command just before exit -R/--huntrleaks RUNCOUNTS -- search for reference leaks (needs debug build, v. slow) -j/--multiprocess PROCESSES -- run PROCESSES processes at once -T/--coverage -- turn on code coverage tracing using the trace module -D/--coverdir DIRECTORY -- Directory where coverage files are put -N/--nocoverdir -- Put coverage files alongside modules -t/--threshold THRESHOLD -- call gc.set_threshold(THRESHOLD) -F/--forever -- run the specified tests in a loop, until an error happens -P/--pgo -- enable Profile Guided Optimization training Additional Option Details: -r randomizes test execution order. You can use --randseed=int to provide an int seed value for the randomizer; this is useful for reproducing troublesome test orders. -s On the first invocation of regrtest using -s, the first test file found or the first test file given on the command line is run, and the name of the next test is recorded in a file named pynexttest. If run from the Python build directory, pynexttest is located in the 'build' subdirectory, otherwise it is located in tempfile.gettempdir(). On subsequent runs, the test in pynexttest is run, and the next test is written to pynexttest. When the last test has been run, pynexttest is deleted. In this way it is possible to single step through the test files. This is useful when doing memory analysis on the Python interpreter, which process tends to consume too many resources to run the full regression test non-stop. -f reads the names of tests from the file given as f's argument, one or more test names per line. Whitespace is ignored. Blank lines and lines beginning with '#' are ignored. This is especially useful for whittling down failures involving interactions among tests. -L causes the leaks(1) command to be run just before exit if it exists. leaks(1) is available on Mac OS X and presumably on some other FreeBSD-derived systems. -R runs each test several times and examines sys.gettotalrefcount() to see if the test appears to be leaking references. The argument should be of the form stab:run:fname where 'stab' is the number of times the test is run to let gettotalrefcount settle down, 'run' is the number of times further it is run and 'fname' is the name of the file the reports are written to. These parameters all have defaults (5, 4 and "reflog.txt" respectively), and the minimal invocation is '-R :'. -M runs tests that require an exorbitant amount of memory. These tests typically try to ascertain containers keep working when containing more than 2 billion objects, which only works on 64-bit systems. There are also some tests that try to exhaust the address space of the process, which only makes sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit, which is a string in the form of '2.5Gb', determines howmuch memory the tests will limit themselves to (but they may go slightly over.) The number shouldn't be more memory than the machine has (including swap memory). You should also keep in mind that swap memory is generally much, much slower than RAM, and setting memlimit to all available RAM or higher will heavily tax the machine. On the other hand, it is no use running these tests with a limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect to use more than memlimit memory will be skipped. The big-memory tests generally run very, very long. -u is used to specify which special resource intensive tests to run, such as those requiring large file support or network connectivity. The argument is a comma-separated list of words indicating the resources to test. Currently only the following are defined: all - Enable all special resources. audio - Tests that use the audio device. (There are known cases of broken audio drivers that can crash Python or even the Linux kernel.) curses - Tests that use curses and will modify the terminal's state and output modes. largefile - It is okay to run some test that may create huge files. These tests can take a long time and may consume >2GB of disk space temporarily. network - It is okay to run tests that use external network resource, e.g. testing SSL support for sockets. bsddb - It is okay to run the bsddb testsuite, which takes a long time to complete. decimal - Test the decimal module against a large suite that verifies compliance with standards. cpu - Used for certain CPU-heavy tests. subprocess Run all tests for the subprocess module. urlfetch - It is okay to download files required on testing. gui - Run tests that require a running GUI. xpickle - Test pickle and cPickle against Python 2.4, 2.5 and 2.6 to test backwards compatibility. These tests take a long time to run. To enable all resources except one, use '-uall,-<resource>'. For example, to run all the tests except for the bsddb tests, give the option '-uall,-bsddb'. """ import StringIO import getopt import json import os import random import re import shutil import sys import time import traceback import warnings import unittest import tempfile import imp import platform import sysconfig # Some times __path__ and __file__ are not absolute (e.g. while running from # Lib/) and, if we change the CWD to run the tests in a temporary dir, some # imports might fail. This affects only the modules imported before os.chdir(). # These modules are searched first in sys.path[0] (so '' -- the CWD) and if # they are found in the CWD their __file__ and __path__ will be relative (this # happens before the chdir). All the modules imported after the chdir, are # not found in the CWD, and since the other paths in sys.path[1:] are absolute # (site.py absolutize them), the __file__ and __path__ will be absolute too. # Therefore it is necessary to absolutize manually the __file__ and __path__ of # the packages to prevent later imports to fail when the CWD is different. for module in sys.modules.itervalues(): if hasattr(module, '__path__'): module.__path__ = [os.path.abspath(path) for path in module.__path__] if hasattr(module, '__file__'): module.__file__ = os.path.abspath(module.__file__) # MacOSX (a.k.a. Darwin) has a default stack size that is too small # for deeply recursive regular expressions. We see this as crashes in # the Python test suite when running test_re.py and test_sre.py. The # fix is to set the stack limit to 2048. # This approach may also be useful for other Unixy platforms that # suffer from small default stack limits. if sys.platform == 'darwin': try: import resource except ImportError: pass else: soft, hard = resource.getrlimit(resource.RLIMIT_STACK) newsoft = min(hard, max(soft, 1024*2048)) resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard)) # Windows, Tkinter, and resetting the environment after each test don't # mix well. To alleviate test failures due to Tcl/Tk not being able to # find its library, get the necessary environment massage done once early. if sys.platform == 'win32': try: import FixTk except Exception: pass # Test result constants. PASSED = 1 FAILED = 0 ENV_CHANGED = -1 SKIPPED = -2 RESOURCE_DENIED = -3 INTERRUPTED = -4 from test import test_support RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb', 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'xpickle') TEMPDIR = os.path.abspath(tempfile.gettempdir()) def usage(code, msg=''): print __doc__ if msg: print msg sys.exit(code) def main(tests=None, testdir=None, verbose=0, quiet=False, exclude=False, single=False, randomize=False, fromfile=None, findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, random_seed=None, use_mp=None, verbose3=False, forever=False, header=False, pgo=False): """Execute a test suite. This also parses command-line options and modifies its behavior accordingly. tests -- a list of strings containing test names (optional) testdir -- the directory in which to look for tests (optional) Users other than the Python test suite will certainly want to specify testdir; if it's omitted, the directory containing the Python test suite is searched for. If the tests argument is omitted, the tests listed on the command-line will be used. If that's empty, too, then all *.py files beginning with test_ will be used. The other default arguments (verbose, quiet, exclude, single, randomize, findleaks, use_resources, trace, coverdir, print_slow, and random_seed) allow programmers calling main() directly to set the values that would normally be set by flags on the command line. """ test_support.record_original_stdout(sys.stdout) try: opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:P', ['help', 'verbose', 'verbose2', 'verbose3', 'quiet', 'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks', 'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir', 'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=', 'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo']) except getopt.error, msg: usage(2, msg) # Defaults if random_seed is None: random_seed = random.randrange(10000000) if use_resources is None: use_resources = [] for o, a in opts: if o in ('-h', '--help'): usage(0) elif o in ('-v', '--verbose'): verbose += 1 elif o in ('-w', '--verbose2'): verbose2 = True elif o in ('-W', '--verbose3'): verbose3 = True elif o in ('-q', '--quiet'): quiet = True; verbose = 0 elif o in ('-x', '--exclude'): exclude = True elif o in ('-s', '--single'): single = True elif o in ('-S', '--slow'): print_slow = True elif o in ('-r', '--randomize'): randomize = True elif o == '--randseed': random_seed = int(a) elif o in ('-f', '--fromfile'): fromfile = a elif o in ('-l', '--findleaks'): findleaks = True elif o in ('-L', '--runleaks'): runleaks = True elif o in ('-t', '--threshold'): import gc gc.set_threshold(int(a)) elif o in ('-T', '--coverage'): trace = True elif o in ('-D', '--coverdir'): coverdir = os.path.join(os.getcwd(), a) elif o in ('-N', '--nocoverdir'): coverdir = None elif o in ('-R', '--huntrleaks'): huntrleaks = a.split(':') if len(huntrleaks) not in (2, 3): print a, huntrleaks usage(2, '-R takes 2 or 3 colon-separated arguments') if not huntrleaks[0]: huntrleaks[0] = 5 else: huntrleaks[0] = int(huntrleaks[0]) if not huntrleaks[1]: huntrleaks[1] = 4 else: huntrleaks[1] = int(huntrleaks[1]) if len(huntrleaks) == 2 or not huntrleaks[2]: huntrleaks[2:] = ["reflog.txt"] elif o in ('-M', '--memlimit'): test_support.set_memlimit(a) elif o in ('-u', '--use'): u = [x.lower() for x in a.split(',')] for r in u: if r == 'all': use_resources[:] = RESOURCE_NAMES continue remove = False if r[0] == '-': remove = True r = r[1:] if r not in RESOURCE_NAMES: usage(1, 'Invalid -u/--use option: ' + a) if remove: if r in use_resources: use_resources.remove(r) elif r not in use_resources: use_resources.append(r) elif o in ('-F', '--forever'): forever = True elif o in ('-j', '--multiprocess'): use_mp = int(a) elif o == '--header': header = True elif o == '--slaveargs': args, kwargs = json.loads(a) try: result = runtest(*args, **kwargs) except BaseException, e: result = INTERRUPTED, e.__class__.__name__ print # Force a newline (just in case) print json.dumps(result) sys.exit(0) elif o in ('-P', '--pgo'): pgo = True else: print >>sys.stderr, ("No handler for option {}. Please " "report this as a bug at http://bugs.python.org.").format(o) sys.exit(1) if single and fromfile: usage(2, "-s and -f don't go together!") if use_mp and trace: usage(2, "-T and -j don't go together!") if use_mp and findleaks: usage(2, "-l and -j don't go together!") good = [] bad = [] skipped = [] resource_denieds = [] environment_changed = [] interrupted = False if findleaks: try: import gc except ImportError: print 'No GC available, disabling findleaks.' findleaks = False else: # Uncomment the line below to report garbage that is not # freeable by reference counting alone. By default only # garbage that is not collectable by the GC is reported. #gc.set_debug(gc.DEBUG_SAVEALL) found_garbage = [] if single: filename = os.path.join(TEMPDIR, 'pynexttest') try: fp = open(filename, 'r') next_test = fp.read().strip() tests = [next_test] fp.close() except IOError: pass if fromfile: tests = [] fp = open(os.path.join(test_support.SAVEDCWD, fromfile)) for line in fp: guts = line.split() # assuming no test has whitespace in its name if guts and not guts[0].startswith('#'): tests.extend(guts) fp.close() # Strip .py extensions. removepy(args) removepy(tests) stdtests = STDTESTS[:] nottests = NOTTESTS.copy() if exclude: for arg in args: if arg in stdtests: stdtests.remove(arg) nottests.add(arg) args = [] # For a partial run, we do not need to clutter the output. if verbose or header or not (quiet or single or tests or args): if not pgo: # Print basic platform information print "==", platform.python_implementation(), \ " ".join(sys.version.split()) print "== ", platform.platform(aliased=True), \ "%s-endian" % sys.byteorder print "== ", os.getcwd() print "Testing with flags:", sys.flags alltests = findtests(testdir, stdtests, nottests) selected = tests or args or alltests if single: selected = selected[:1] try: next_single_test = alltests[alltests.index(selected[0])+1] except IndexError: next_single_test = None if randomize: random.seed(random_seed) print "Using random seed", random_seed random.shuffle(selected) if trace: import trace tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=False, count=True) test_times = [] test_support.use_resources = use_resources save_modules = set(sys.modules) def accumulate_result(test, result): ok, test_time = result test_times.append((test_time, test)) if ok == PASSED: good.append(test) elif ok == FAILED: bad.append(test) elif ok == ENV_CHANGED: environment_changed.append(test) elif ok == SKIPPED: skipped.append(test) elif ok == RESOURCE_DENIED: skipped.append(test) resource_denieds.append(test) if forever: def test_forever(tests=list(selected)): while True: for test in tests: yield test if bad: return tests = test_forever() test_count = '' test_count_width = 3 else: tests = iter(selected) test_count = '/{}'.format(len(selected)) test_count_width = len(test_count) - 1 if use_mp: try: from threading import Thread except ImportError: print "Multiprocess option requires thread support" sys.exit(2) from Queue import Queue from subprocess import Popen, PIPE debug_output_pat = re.compile(r"\[\d+ refs\]$") output = Queue() def tests_and_args(): for test in tests: args_tuple = ( (test, verbose, quiet), dict(huntrleaks=huntrleaks, use_resources=use_resources, pgo=pgo) ) yield (test, args_tuple) pending = tests_and_args() opt_args = test_support.args_from_interpreter_flags() base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest'] # required to spawn a new process with PGO flag on/off if pgo: base_cmd = base_cmd + ['--pgo'] def work(): # A worker thread. try: while True: try: test, args_tuple = next(pending) except StopIteration: output.put((None, None, None, None)) return # -E is needed by some tests, e.g. test_import popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)], stdout=PIPE, stderr=PIPE, universal_newlines=True, close_fds=(os.name != 'nt')) stdout, stderr = popen.communicate() # Strip last refcount output line if it exists, since it # comes from the shutdown of the interpreter in the subcommand. stderr = debug_output_pat.sub("", stderr) stdout, _, result = stdout.strip().rpartition("\n") if not result: output.put((None, None, None, None)) return result = json.loads(result) output.put((test, stdout.rstrip(), stderr.rstrip(), result)) except BaseException: output.put((None, None, None, None)) raise workers = [Thread(target=work) for i in range(use_mp)] for worker in workers: worker.start() finished = 0 test_index = 1 try: while finished < use_mp: test, stdout, stderr, result = output.get() if test is None: finished += 1 continue if stdout: print stdout if stderr and not pgo: print >>sys.stderr, stderr sys.stdout.flush() sys.stderr.flush() if result[0] == INTERRUPTED: assert result[1] == 'KeyboardInterrupt' raise KeyboardInterrupt # What else? accumulate_result(test, result) if not quiet: fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) test_index += 1 except KeyboardInterrupt: interrupted = True pending.close() for worker in workers: worker.join() else: for test_index, test in enumerate(tests, 1): if not quiet: fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) sys.stdout.flush() if trace: # If we're tracing code coverage, then we don't exit with status # if on a false return value from main. tracer.runctx('runtest(test, verbose, quiet)', globals=globals(), locals=vars()) else: try: result = runtest(test, verbose, quiet, huntrleaks, None, pgo) accumulate_result(test, result) if verbose3 and result[0] == FAILED: if not pgo: print "Re-running test %r in verbose mode" % test runtest(test, True, quiet, huntrleaks, None, pgo) except KeyboardInterrupt: interrupted = True break except: raise if findleaks: gc.collect() if gc.garbage: print "Warning: test created", len(gc.garbage), print "uncollectable object(s)." # move the uncollectable objects somewhere so we don't see # them again found_garbage.extend(gc.garbage) del gc.garbage[:] # Unload the newly imported modules (best effort finalization) for module in sys.modules.keys(): if module not in save_modules and module.startswith("test."): test_support.unload(module) if interrupted and not pgo: # print a newline after ^C print print "Test suite interrupted by signal SIGINT." omitted = set(selected) - set(good) - set(bad) - set(skipped) print count(len(omitted), "test"), "omitted:" printlist(omitted) if good and not quiet and not pgo: if not bad and not skipped and not interrupted and len(good) > 1: print "All", print count(len(good), "test"), "OK." if print_slow: test_times.sort(reverse=True) print "10 slowest tests:" for time, test in test_times[:10]: print "%s: %.1fs" % (test, time) if bad and not pgo: print count(len(bad), "test"), "failed:" printlist(bad) if environment_changed and not pgo: print "{} altered the execution environment:".format( count(len(environment_changed), "test")) printlist(environment_changed) if skipped and not quiet and not pgo: print count(len(skipped), "test"), "skipped:" printlist(skipped) e = _ExpectedSkips() plat = sys.platform if e.isvalid(): surprise = set(skipped) - e.getexpected() - set(resource_denieds) if surprise: print count(len(surprise), "skip"), \ "unexpected on", plat + ":" printlist(surprise) else: print "Those skips are all expected on", plat + "." else: print "Ask someone to teach regrtest.py about which tests are" print "expected to get skipped on", plat + "." if verbose2 and bad: print "Re-running failed tests in verbose mode" for test in bad[:]: print "Re-running test %r in verbose mode" % test sys.stdout.flush() try: test_support.verbose = True ok = runtest(test, True, quiet, huntrleaks, None, pgo) except KeyboardInterrupt: # print a newline separate from the ^C print break else: if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}: bad.remove(test) else: if bad: print count(len(bad), "test"), "failed again:" printlist(bad) if single: if next_single_test: with open(filename, 'w') as fp: fp.write(next_single_test + '\n') else: os.unlink(filename) if trace: r = tracer.results() r.write_results(show_missing=True, summary=True, coverdir=coverdir) if runleaks: os.system("leaks %d" % os.getpid()) sys.exit(len(bad) > 0 or interrupted) STDTESTS = [ 'test_grammar', 'test_opcodes', 'test_dict', 'test_builtin', 'test_exceptions', 'test_types', 'test_unittest', 'test_doctest', 'test_doctest2', ] NOTTESTS = { 'test_support', 'test_future1', 'test_future2', } def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS): """Return a list of all applicable test modules.""" testdir = findtestdir(testdir) names = os.listdir(testdir) tests = [] others = set(stdtests) | nottests for name in names: modname, ext = os.path.splitext(name) if modname[:5] == "test_" and ext == ".py" and modname not in others: tests.append(modname) return stdtests + sorted(tests) def runtest(test, verbose, quiet, huntrleaks=False, use_resources=None, pgo=False): """Run a single test. test -- the name of the test verbose -- if true, print more messages quiet -- if true, don't print 'skipped' messages (probably redundant) test_times -- a list of (time, test_name) pairs huntrleaks -- run multiple times to test for leaks; requires a debug build; a triple corresponding to -R's three arguments pgo -- if true, do not print unnecessary info when running the test for Profile Guided Optimization build Returns one of the test result constants: INTERRUPTED KeyboardInterrupt when run under -j RESOURCE_DENIED test skipped because resource denied SKIPPED test skipped for some other reason ENV_CHANGED test failed because it changed the execution environment FAILED test failed PASSED test passed """ test_support.verbose = verbose # Tell tests to be moderately quiet if use_resources is not None: test_support.use_resources = use_resources try: return runtest_inner(test, verbose, quiet, huntrleaks, pgo) finally: cleanup_test_droppings(test, verbose) # Unit tests are supposed to leave the execution environment unchanged # once they complete. But sometimes tests have bugs, especially when # tests fail, and the changes to environment go on to mess up other # tests. This can cause issues with buildbot stability, since tests # are run in random order and so problems may appear to come and go. # There are a few things we can save and restore to mitigate this, and # the following context manager handles this task. class saved_test_environment: """Save bits of the test environment and restore them at block exit. with saved_test_environment(testname, verbose, quiet): #stuff Unless quiet is True, a warning is printed to stderr if any of the saved items was changed by the test. The attribute 'changed' is initially False, but is set to True if a change is detected. If verbose is more than 1, the before and after state of changed items is also printed. """ changed = False def __init__(self, testname, verbose=0, quiet=False, pgo=False): self.testname = testname self.verbose = verbose self.quiet = quiet self.pgo = pgo # To add things to save and restore, add a name XXX to the resources list # and add corresponding get_XXX/restore_XXX functions. get_XXX should # return the value to be saved and compared against a second call to the # get function when test execution completes. restore_XXX should accept # the saved value and restore the resource using it. It will be called if # and only if a change in the value is detected. # # Note: XXX will have any '.' replaced with '_' characters when determining # the corresponding method names. resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr', 'os.environ', 'sys.path', 'asyncore.socket_map', 'files', ) def get_sys_argv(self): return id(sys.argv), sys.argv, sys.argv[:] def restore_sys_argv(self, saved_argv): sys.argv = saved_argv[1] sys.argv[:] = saved_argv[2] def get_cwd(self): return os.getcwd() def restore_cwd(self, saved_cwd): os.chdir(saved_cwd) def get_sys_stdout(self): return sys.stdout def restore_sys_stdout(self, saved_stdout): sys.stdout = saved_stdout def get_sys_stderr(self): return sys.stderr def restore_sys_stderr(self, saved_stderr): sys.stderr = saved_stderr def get_sys_stdin(self): return sys.stdin def restore_sys_stdin(self, saved_stdin): sys.stdin = saved_stdin def get_os_environ(self): return id(os.environ), os.environ, dict(os.environ) def restore_os_environ(self, saved_environ): os.environ = saved_environ[1] os.environ.clear() os.environ.update(saved_environ[2]) def get_sys_path(self): return id(sys.path), sys.path, sys.path[:] def restore_sys_path(self, saved_path): sys.path = saved_path[1] sys.path[:] = saved_path[2] def get_asyncore_socket_map(self): asyncore = sys.modules.get('asyncore') # XXX Making a copy keeps objects alive until __exit__ gets called. return asyncore and asyncore.socket_map.copy() or {} def restore_asyncore_socket_map(self, saved_map): asyncore = sys.modules.get('asyncore') if asyncore is not None: asyncore.close_all(ignore_all=True) asyncore.socket_map.update(saved_map) def get_test_support_TESTFN(self): if os.path.isfile(test_support.TESTFN): result = 'f' elif os.path.isdir(test_support.TESTFN): result = 'd' else: result = None return result def restore_test_support_TESTFN(self, saved_value): if saved_value is None: if os.path.isfile(test_support.TESTFN): os.unlink(test_support.TESTFN) elif os.path.isdir(test_support.TESTFN): shutil.rmtree(test_support.TESTFN) def get_files(self): return sorted(fn + ('/' if os.path.isdir(fn) else '') for fn in os.listdir(os.curdir)) def restore_files(self, saved_value): fn = test_support.TESTFN if fn not in saved_value and (fn + '/') not in saved_value: if os.path.isfile(fn): test_support.unlink(fn) elif os.path.isdir(fn): test_support.rmtree(fn) def resource_info(self): for name in self.resources: method_suffix = name.replace('.', '_') get_name = 'get_' + method_suffix restore_name = 'restore_' + method_suffix yield name, getattr(self, get_name), getattr(self, restore_name) def __enter__(self): self.saved_values = dict((name, get()) for name, get, restore in self.resource_info()) return self def __exit__(self, exc_type, exc_val, exc_tb): saved_values = self.saved_values del self.saved_values for name, get, restore in self.resource_info(): current = get() original = saved_values.pop(name) # Check for changes to the resource's value if current != original: self.changed = True restore(original) if not self.quiet and not self.pgo: print >>sys.stderr, ( "Warning -- {} was modified by {}".format( name, self.testname)) if self.verbose > 1 and not self.pgo: print >>sys.stderr, ( " Before: {}\n After: {} ".format( original, current)) # XXX (ncoghlan): for most resources (e.g. sys.path) identity # matters at least as much as value. For others (e.g. cwd), # identity is irrelevant. Should we add a mechanism to check # for substitution in the cases where it matters? return False def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False): test_support.unload(test) if verbose: capture_stdout = None else: capture_stdout = StringIO.StringIO() test_time = 0.0 refleak = False # True if the test leaked references. try: save_stdout = sys.stdout try: if capture_stdout: sys.stdout = capture_stdout if test.startswith('test.'): abstest = test else: # Always import it from the test package abstest = 'test.' + test clear_caches() with saved_test_environment(test, verbose, quiet, pgo) as environment: start_time = time.time() the_package = __import__(abstest, globals(), locals(), []) the_module = getattr(the_package, test) # Old tests run to completion simply as a side-effect of # being imported. For tests based on unittest or doctest, # explicitly invoke their test_main() function (if it exists). indirect_test = getattr(the_module, "test_main", None) if indirect_test is not None: indirect_test() if huntrleaks: refleak = dash_R(the_module, test, indirect_test, huntrleaks) test_time = time.time() - start_time finally: sys.stdout = save_stdout except test_support.ResourceDenied, msg: if not quiet and not pgo: print test, "skipped --", msg sys.stdout.flush() return RESOURCE_DENIED, test_time except unittest.SkipTest, msg: if not quiet and not pgo: print test, "skipped --", msg sys.stdout.flush() return SKIPPED, test_time except KeyboardInterrupt: raise except test_support.TestFailed, msg: if not pgo: print >>sys.stderr, "test", test, "failed --", msg sys.stderr.flush() return FAILED, test_time except: type, value = sys.exc_info()[:2] if not pgo: print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value sys.stderr.flush() if verbose and not pgo: traceback.print_exc(file=sys.stderr) sys.stderr.flush() return FAILED, test_time else: if refleak: return FAILED, test_time if environment.changed: return ENV_CHANGED, test_time # Except in verbose mode, tests should not print anything if verbose or huntrleaks: return PASSED, test_time output = capture_stdout.getvalue() if not output: return PASSED, test_time print "test", test, "produced unexpected output:" print "*" * 70 print output print "*" * 70 sys.stdout.flush() return FAILED, test_time def cleanup_test_droppings(testname, verbose): import stat import gc # First kill any dangling references to open files etc. gc.collect() # Try to clean up junk commonly left behind. While tests shouldn't leave # any files or directories behind, when a test fails that can be tedious # for it to arrange. The consequences can be especially nasty on Windows, # since if a test leaves a file open, it cannot be deleted by name (while # there's nothing we can do about that here either, we can display the # name of the offending test, which is a real help). for name in (test_support.TESTFN, "db_home", ): if not os.path.exists(name): continue if os.path.isdir(name): kind, nuker = "directory", shutil.rmtree elif os.path.isfile(name): kind, nuker = "file", os.unlink else: raise SystemError("os.path says %r exists but is neither " "directory nor file" % name) if verbose: print "%r left behind %s %r" % (testname, kind, name) try: # if we have chmod, fix possible permissions problems # that might prevent cleanup if (hasattr(os, 'chmod')): os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) nuker(name) except Exception, msg: print >> sys.stderr, ("%r left behind %s %r and it couldn't be " "removed: %s" % (testname, kind, name, msg)) def dash_R(the_module, test, indirect_test, huntrleaks): """Run a test multiple times, looking for reference leaks. Returns: False if the test didn't leak references; True if we detected refleaks. """ # This code is hackish and inelegant, but it seems to do the job. import copy_reg, _abcoll, _pyio if not hasattr(sys, 'gettotalrefcount'): raise Exception("Tracking reference leaks requires a debug build " "of Python") # Save current values for dash_R_cleanup() to restore. fs = warnings.filters[:] ps = copy_reg.dispatch_table.copy() pic = sys.path_importer_cache.copy() try: import zipimport except ImportError: zdc = None # Run unmodified on platforms without zipimport support else: zdc = zipimport._zip_directory_cache.copy() abcs = {} modules = _abcoll, _pyio for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]: # XXX isinstance(abc, ABCMeta) leads to infinite recursion if not hasattr(abc, '_abc_registry'): continue for obj in abc.__subclasses__() + [abc]: abcs[obj] = obj._abc_registry.copy() if indirect_test: def run_the_test(): indirect_test() else: def run_the_test(): imp.reload(the_module) deltas = [] nwarmup, ntracked, fname = huntrleaks fname = os.path.join(test_support.SAVEDCWD, fname) repcount = nwarmup + ntracked print >> sys.stderr, "beginning", repcount, "repetitions" print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount] dash_R_cleanup(fs, ps, pic, zdc, abcs) for i in range(repcount): rc_before = sys.gettotalrefcount() run_the_test() sys.stderr.write('.') dash_R_cleanup(fs, ps, pic, zdc, abcs) rc_after = sys.gettotalrefcount() if i >= nwarmup: deltas.append(rc_after - rc_before) print >> sys.stderr if any(deltas): msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas)) print >> sys.stderr, msg with open(fname, "a") as refrep: print >> refrep, msg refrep.flush() return True return False def dash_R_cleanup(fs, ps, pic, zdc, abcs): import gc, copy_reg # Restore some original values. warnings.filters[:] = fs copy_reg.dispatch_table.clear() copy_reg.dispatch_table.update(ps) sys.path_importer_cache.clear() sys.path_importer_cache.update(pic) try: import zipimport except ImportError: pass # Run unmodified on platforms without zipimport support else: zipimport._zip_directory_cache.clear() zipimport._zip_directory_cache.update(zdc) # clear type cache sys._clear_type_cache() # Clear ABC registries, restoring previously saved ABC registries. for abc, registry in abcs.items(): abc._abc_registry = registry.copy() abc._abc_cache.clear() abc._abc_negative_cache.clear() clear_caches() def clear_caches(): import gc # Clear the warnings registry, so they can be displayed again for mod in sys.modules.values(): if hasattr(mod, '__warningregistry__'): del mod.__warningregistry__ # Clear assorted module caches. # Don't worry about resetting the cache if the module is not loaded try: distutils_dir_util = sys.modules['distutils.dir_util'] except KeyError: pass else: distutils_dir_util._path_created.clear() re.purge() try: _strptime = sys.modules['_strptime'] except KeyError: pass else: _strptime._regex_cache.clear() try: urlparse = sys.modules['urlparse'] except KeyError: pass else: urlparse.clear_cache() try: urllib = sys.modules['urllib'] except KeyError: pass else: urllib.urlcleanup() try: urllib2 = sys.modules['urllib2'] except KeyError: pass else: urllib2.install_opener(None) try: dircache = sys.modules['dircache'] except KeyError: pass else: dircache.reset() try: linecache = sys.modules['linecache'] except KeyError: pass else: linecache.clearcache() try: mimetypes = sys.modules['mimetypes'] except KeyError: pass else: mimetypes._default_mime_types() try: filecmp = sys.modules['filecmp'] except KeyError: pass else: filecmp._cache.clear() try: struct = sys.modules['struct'] except KeyError: pass else: struct._clearcache() try: doctest = sys.modules['doctest'] except KeyError: pass else: doctest.master = None try: ctypes = sys.modules['ctypes'] except KeyError: pass else: ctypes._reset_cache() # Collect cyclic trash. gc.collect() def findtestdir(path=None): return path or os.path.dirname(__file__) or os.curdir def removepy(names): if not names: return for idx, name in enumerate(names): basename, ext = os.path.splitext(name) if ext == '.py': names[idx] = basename def count(n, word): if n == 1: return "%d %s" % (n, word) else: return "%d %ss" % (n, word) def printlist(x, width=70, indent=4): """Print the elements of iterable x to stdout. Optional arg width (default 70) is the maximum line length. Optional arg indent (default 4) is the number of blanks with which to begin each line. """ from textwrap import fill blanks = ' ' * indent # Print the sorted list: 'x' may be a '--random' list or a set() print fill(' '.join(str(elt) for elt in sorted(x)), width, initial_indent=blanks, subsequent_indent=blanks) # Map sys.platform to a string containing the basenames of tests # expected to be skipped on that platform. # # Special cases: # test_pep277 # The _ExpectedSkips constructor adds this to the set of expected # skips if not os.path.supports_unicode_filenames. # test_timeout # Controlled by test_timeout.skip_expected. Requires the network # resource and a socket module. # # Tests that are expected to be skipped everywhere except on one platform # are also handled separately. _expectations = { 'win32': """ test__locale test_bsddb185 test_bsddb3 test_commands test_crypt test_curses test_dbm test_dl test_fcntl test_fork1 test_epoll test_gdbm test_grp test_ioctl test_largefile test_kqueue test_mhlib test_openpty test_ossaudiodev test_pipes test_poll test_posix test_pty test_pwd test_resource test_signal test_spwd test_threadsignals test_timing test_wait3 test_wait4 """, 'linux2': """ test_bsddb185 test_curses test_dl test_largefile test_kqueue test_ossaudiodev """, 'unixware7': """ test_bsddb test_bsddb185 test_dl test_epoll test_largefile test_kqueue test_minidom test_openpty test_pyexpat test_sax test_sundry """, 'openunix8': """ test_bsddb test_bsddb185 test_dl test_epoll test_largefile test_kqueue test_minidom test_openpty test_pyexpat test_sax test_sundry """, 'sco_sv3': """ test_asynchat test_bsddb test_bsddb185 test_dl test_fork1 test_epoll test_gettext test_largefile test_locale test_kqueue test_minidom test_openpty test_pyexpat test_queue test_sax test_sundry test_thread test_threaded_import test_threadedtempfile test_threading """, 'riscos': """ test_asynchat test_atexit test_bsddb test_bsddb185 test_bsddb3 test_commands test_crypt test_dbm test_dl test_fcntl test_fork1 test_epoll test_gdbm test_grp test_largefile test_locale test_kqueue test_mmap test_openpty test_poll test_popen2 test_pty test_pwd test_strop test_sundry test_thread test_threaded_import test_threadedtempfile test_threading test_timing """, 'darwin': """ test__locale test_bsddb test_bsddb3 test_curses test_epoll test_gdb test_gdbm test_largefile test_locale test_kqueue test_minidom test_ossaudiodev test_poll """, 'sunos5': """ test_bsddb test_bsddb185 test_curses test_dbm test_epoll test_kqueue test_gdbm test_gzip test_openpty test_zipfile test_zlib """, 'hp-ux11': """ test_bsddb test_bsddb185 test_curses test_dl test_epoll test_gdbm test_gzip test_largefile test_locale test_kqueue test_minidom test_openpty test_pyexpat test_sax test_zipfile test_zlib """, 'atheos': """ test_bsddb185 test_curses test_dl test_gdbm test_epoll test_largefile test_locale test_kqueue test_mhlib test_mmap test_poll test_popen2 test_resource """, 'cygwin': """ test_bsddb185 test_bsddb3 test_curses test_dbm test_epoll test_ioctl test_kqueue test_largefile test_locale test_ossaudiodev test_socketserver """, 'os2emx': """ test_audioop test_bsddb185 test_bsddb3 test_commands test_curses test_dl test_epoll test_kqueue test_largefile test_mhlib test_mmap test_openpty test_ossaudiodev test_pty test_resource test_signal """, 'freebsd4': """ test_bsddb test_bsddb3 test_epoll test_gdbm test_locale test_ossaudiodev test_pep277 test_pty test_socketserver test_tcl test_tk test_ttk_guionly test_ttk_textonly test_timeout test_urllibnet test_multiprocessing """, 'aix5': """ test_bsddb test_bsddb185 test_bsddb3 test_bz2 test_dl test_epoll test_gdbm test_gzip test_kqueue test_ossaudiodev test_tcl test_tk test_ttk_guionly test_ttk_textonly test_zipimport test_zlib """, 'openbsd3': """ test_ascii_formatd test_bsddb test_bsddb3 test_ctypes test_dl test_epoll test_gdbm test_locale test_normalization test_ossaudiodev test_pep277 test_tcl test_tk test_ttk_guionly test_ttk_textonly test_multiprocessing """, 'netbsd3': """ test_ascii_formatd test_bsddb test_bsddb185 test_bsddb3 test_ctypes test_curses test_dl test_epoll test_gdbm test_locale test_ossaudiodev test_pep277 test_tcl test_tk test_ttk_guionly test_ttk_textonly test_multiprocessing """, } _expectations['freebsd5'] = _expectations['freebsd4'] _expectations['freebsd6'] = _expectations['freebsd4'] _expectations['freebsd7'] = _expectations['freebsd4'] _expectations['freebsd8'] = _expectations['freebsd4'] class _ExpectedSkips: def __init__(self): import os.path from test import test_timeout self.valid = False if sys.platform in _expectations: s = _expectations[sys.platform] self.expected = set(s.split()) # expected to be skipped on every platform, even Linux self.expected.add('test_linuxaudiodev') if not os.path.supports_unicode_filenames: self.expected.add('test_pep277') if test_timeout.skip_expected: self.expected.add('test_timeout') if sys.maxint == 9223372036854775807L: self.expected.add('test_imageop') if sys.platform != "darwin": MAC_ONLY = ["test_macos", "test_macostools", "test_aepack", "test_plistlib", "test_scriptpackages", "test_applesingle"] for skip in MAC_ONLY: self.expected.add(skip) elif len(u'\0'.encode('unicode-internal')) == 4: self.expected.add("test_macostools") if sys.platform != "win32": # test_sqlite is only reliable on Windows where the library # is distributed with Python WIN_ONLY = ["test_unicode_file", "test_winreg", "test_winsound", "test_startfile", "test_sqlite", "test_msilib"] for skip in WIN_ONLY: self.expected.add(skip) if sys.platform != 'irix': IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl", "test_gl", "test_imgfile"] for skip in IRIX_ONLY: self.expected.add(skip) if sys.platform != 'sunos5': self.expected.add('test_sunaudiodev') self.expected.add('test_nis') if not sys.py3kwarning: self.expected.add('test_py3kwarn') self.valid = True def isvalid(self): "Return true iff _ExpectedSkips knows about the current platform." return self.valid def getexpected(self): """Return set of test names we expect to skip on current platform. self.isvalid() must be true. """ assert self.isvalid() return self.expected if __name__ == '__main__': # findtestdir() gets the dirname out of __file__, so we have to make it # absolute before changing the working directory. # For example __file__ may be relative when running trace or profile. # See issue #9323. __file__ = os.path.abspath(__file__) # sanity check assert __file__ == os.path.abspath(sys.argv[0]) # When tests are run from the Python build directory, it is best practice # to keep the test files in a subfolder. It eases the cleanup of leftover # files using command "make distclean". if sysconfig.is_python_build(): TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build') TEMPDIR = os.path.abspath(TEMPDIR) if not os.path.exists(TEMPDIR): os.mkdir(TEMPDIR) # Define a writable temp dir that will be used as cwd while running # the tests. The name of the dir includes the pid to allow parallel # testing (see the -j option). TESTCWD = 'test_python_{}'.format(os.getpid()) TESTCWD = os.path.join(TEMPDIR, TESTCWD) # Run the tests in a context manager that temporary changes the CWD to a # temporary and writable directory. If it's not possible to create or # change the CWD, the original CWD will be used. The original CWD is # available from test_support.SAVEDCWD. with test_support.temp_cwd(TESTCWD, quiet=True): main()
test.py
import asyncio from pyppeteer import launch class engine(): def __init__(self): pass async def start(self): self.browser = await launch({'headless': True}) self.page = await self.browser.newPage() async def goto(self,url): await self.page.goto(url) return await self.page.content() async def close(self): await self.browser.close() urls = ['https://accounts.gursurdevelopers.com/class','https://accounts.gursurdevelopers.com/class','https://accounts.gursurdevelopers.com/class'] from multiprocessing import Process def f(en,x): asyncio.get_event_loop().run_until_complete(en.goto(x)) if __name__ == '__main__': en = engine() asyncio.get_event_loop().run_until_complete(en.start()) for x in urls: print(x) p = Process(target=f, args=(en,x,)) p.start() #asyncio.get_event_loop().run_until_complete(en.close()) # import asyncio # from pyppeteer import launch # async def main(): # browser = await launch({'headless': True}) # page = await browser.newPage() # await page.goto('https://accounts.gursurdevelopers.com/class') # content = await page.content() # print(content) # await browser.close() # asyncio.get_event_loop().run_until_complete(main()) # from flask import Flask # from multiprocessing import Process # from requests_html import HTMLSession # app = Flask(__name__) # def testfu(r): # r.html.render() # table = r.html.find('#mainapp', first=True) # print(table.text) # return table.text # @app.route('/') # def hello_world(): # return 'Hello, World!' # @app.route('/test') # def dshello_world(): # session = HTMLSession() # r = session.get('http://accounts.gursurdevelopers.com/') # p = Process(target=testfu, args=(r,)) # p.start() # p.join() # return 'testfu' # app.run()
tests.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import errno import os import shutil import sys import tempfile import threading import time import unittest from datetime import datetime, timedelta from django.core.cache import cache from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation from django.core.files.base import ContentFile, File from django.core.files.storage import FileSystemStorage, get_storage_class from django.core.files.uploadedfile import ( InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile, ) from django.db.models.fields.files import FileDescriptor from django.test import ( LiveServerTestCase, SimpleTestCase, TestCase, ignore_warnings, override_settings, ) from django.test.utils import requires_tz_support from django.utils import six, timezone from django.utils._os import upath from django.utils.deprecation import RemovedInDjango20Warning from django.utils.six.moves.urllib.request import urlopen from .models import Storage, temp_storage, temp_storage_location try: import pytz except ImportError: pytz = None FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}' class GetStorageClassTests(SimpleTestCase): def test_get_filesystem_storage(self): """ get_storage_class returns the class for a storage backend name/path. """ self.assertEqual( get_storage_class('django.core.files.storage.FileSystemStorage'), FileSystemStorage) def test_get_invalid_storage_module(self): """ get_storage_class raises an error if the requested import don't exist. """ with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"): get_storage_class('storage.NonExistingStorage') def test_get_nonexisting_storage_class(self): """ get_storage_class raises an error if the requested class don't exist. """ with self.assertRaises(ImportError): get_storage_class('django.core.files.storage.NonExistingStorage') def test_get_nonexisting_storage_module(self): """ get_storage_class raises an error if the requested module don't exist. """ # Error message may or may not be the fully qualified path. with six.assertRaisesRegex(self, ImportError, "No module named '?(django.core.files.)?non_existing_storage'?"): get_storage_class('django.core.files.non_existing_storage.NonExistingStorage') class FileStorageDeconstructionTests(unittest.TestCase): def test_deconstruction(self): path, args, kwargs = temp_storage.deconstruct() self.assertEqual(path, "django.core.files.storage.FileSystemStorage") self.assertEqual(args, tuple()) self.assertEqual(kwargs, {'location': temp_storage_location}) kwargs_orig = { 'location': temp_storage_location, 'base_url': 'http://myfiles.example.com/' } storage = FileSystemStorage(**kwargs_orig) path, args, kwargs = storage.deconstruct() self.assertEqual(kwargs, kwargs_orig) # Tests for TZ-aware time methods need pytz. requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz") class FileStorageTests(SimpleTestCase): storage_class = FileSystemStorage def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/') # Set up a second temporary directory which is ensured to have a mixed # case name. self.temp_dir2 = tempfile.mkdtemp(suffix='aBc') def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.temp_dir2) def test_empty_location(self): """ Makes sure an exception is raised if the location is empty """ storage = self.storage_class(location='') self.assertEqual(storage.base_location, '') self.assertEqual(storage.location, upath(os.getcwd())) def test_file_access_options(self): """ Standard file access options are available, and work as expected. """ self.assertFalse(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'w') f.write('storage contents') f.close() self.assertTrue(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'r') self.assertEqual(f.read(), 'storage contents') f.close() self.storage.delete('storage_test') self.assertFalse(self.storage.exists('storage_test')) def _test_file_time_getter(self, getter): # Check for correct behavior under both USE_TZ=True and USE_TZ=False. # The tests are similar since they both set up a situation where the # system time zone, Django's TIME_ZONE, and UTC are distinct. self._test_file_time_getter_tz_handling_on(getter) self._test_file_time_getter_tz_handling_off(getter) @override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers') def _test_file_time_getter_tz_handling_on(self, getter): # Django's TZ (and hence the system TZ) is set to Africa/Algiers which # is UTC+1 and has no DST change. We can set the Django TZ to something # else so that UTC, Django's TIME_ZONE, and the system timezone are all # different. now_in_algiers = timezone.make_aware(datetime.now()) # Use a fixed offset timezone so we don't need pytz. with timezone.override(timezone.get_fixed_timezone(-300)): # At this point the system TZ is +1 and the Django TZ # is -5. The following will be aware in UTC. now = timezone.now() self.assertFalse(self.storage.exists('test.file.tz.on')) f = ContentFile('custom contents') f_name = self.storage.save('test.file.tz.on', f) self.addCleanup(self.storage.delete, f_name) dt = getter(f_name) # dt should be aware, in UTC self.assertTrue(timezone.is_aware(dt)) self.assertEqual(now.tzname(), dt.tzname()) # Check that the three timezones are indeed distinct. naive_now = datetime.now() algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now) django_offset = timezone.get_current_timezone().utcoffset(naive_now) utc_offset = timezone.utc.utcoffset(naive_now) self.assertGreater(algiers_offset, utc_offset) self.assertLess(django_offset, utc_offset) # dt and now should be the same effective time. self.assertLess(abs(dt - now), timedelta(seconds=2)) @override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers') def _test_file_time_getter_tz_handling_off(self, getter): # Django's TZ (and hence the system TZ) is set to Africa/Algiers which # is UTC+1 and has no DST change. We can set the Django TZ to something # else so that UTC, Django's TIME_ZONE, and the system timezone are all # different. now_in_algiers = timezone.make_aware(datetime.now()) # Use a fixed offset timezone so we don't need pytz. with timezone.override(timezone.get_fixed_timezone(-300)): # At this point the system TZ is +1 and the Django TZ # is -5. self.assertFalse(self.storage.exists('test.file.tz.off')) f = ContentFile('custom contents') f_name = self.storage.save('test.file.tz.off', f) self.addCleanup(self.storage.delete, f_name) dt = getter(f_name) # dt should be naive, in system (+1) TZ self.assertTrue(timezone.is_naive(dt)) # Check that the three timezones are indeed distinct. naive_now = datetime.now() algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now) django_offset = timezone.get_current_timezone().utcoffset(naive_now) utc_offset = timezone.utc.utcoffset(naive_now) self.assertGreater(algiers_offset, utc_offset) self.assertLess(django_offset, utc_offset) # dt and naive_now should be the same effective time. self.assertLess(abs(dt - naive_now), timedelta(seconds=2)) # If we convert dt to an aware object using the Algiers # timezone then it should be the same effective time to # now_in_algiers. _dt = timezone.make_aware(dt, now_in_algiers.tzinfo) self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2)) @requires_pytz def test_file_get_accessed_time(self): """ File storage returns a Datetime object for the last accessed time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) atime = self.storage.get_accessed_time(f_name) self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2)) @requires_pytz @requires_tz_support def test_file_get_accessed_time_timezone(self): self._test_file_time_getter(self.storage.get_accessed_time) @ignore_warnings(category=RemovedInDjango20Warning) def test_file_accessed_time(self): """ File storage returns a datetime for the last accessed time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) atime = self.storage.accessed_time(f_name) self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2)) @requires_pytz def test_file_get_created_time(self): """ File storage returns a datetime for the creation time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) ctime = self.storage.get_created_time(f_name) self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2)) @requires_pytz @requires_tz_support def test_file_get_created_time_timezone(self): self._test_file_time_getter(self.storage.get_created_time) @ignore_warnings(category=RemovedInDjango20Warning) def test_file_created_time(self): """ File storage returns a datetime for the creation time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) ctime = self.storage.created_time(f_name) self.addCleanup(self.storage.delete, f_name) self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2)) @requires_pytz def test_file_get_modified_time(self): """ File storage returns a datetime for the last modified time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) mtime = self.storage.get_modified_time(f_name) self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name)))) self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2)) @requires_pytz @requires_tz_support def test_file_get_modified_time_timezone(self): self._test_file_time_getter(self.storage.get_modified_time) @ignore_warnings(category=RemovedInDjango20Warning) def test_file_modified_time(self): """ File storage returns a datetime for the last modified time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.addCleanup(self.storage.delete, f_name) mtime = self.storage.modified_time(f_name) self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2)) def test_file_save_without_name(self): """ File storage extracts the filename from the content object if no name is given explicitly. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f.name = 'test.file' storage_f_name = self.storage.save(None, f) self.assertEqual(storage_f_name, f.name) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name))) self.storage.delete(storage_f_name) def test_file_save_with_path(self): """ Saving a pathname should create intermediate directories as necessary. """ self.assertFalse(self.storage.exists('path/to')) self.storage.save('path/to/test.file', ContentFile('file saved with path')) self.assertTrue(self.storage.exists('path/to')) with self.storage.open('path/to/test.file') as f: self.assertEqual(f.read(), b'file saved with path') self.assertTrue(os.path.exists( os.path.join(self.temp_dir, 'path', 'to', 'test.file'))) self.storage.delete('path/to/test.file') def test_save_doesnt_close(self): with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file: file.write(b'1') file.seek(0) self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) file = InMemoryUploadedFile(six.StringIO('1'), '', 'test', 'text/plain', 1, 'utf8') with file: self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) def test_file_path(self): """ File storage returns the full path of a file """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name)) self.storage.delete(f_name) def test_file_url(self): """ File storage returns a url to access a given file from the Web. """ self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file') # should encode special chars except ~!*()' # like encodeURIComponent() JavaScript function do self.assertEqual( self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"), "/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file" ) self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c") # should translate os path separator(s) to the url path separator self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file") # #25905: remove leading slashes from file names to prevent unsafe url output self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(None), "/test_media_url/") def test_base_url(self): """ File storage returns a url even when its base_url is unset or modified. """ self.storage.base_url = None with self.assertRaises(ValueError): self.storage.url('test.file') # #22717: missing ending slash in base_url should be auto-corrected storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash') self.assertEqual( storage.url('test.file'), '%s%s' % (storage.base_url, 'test.file') ) def test_listdir(self): """ File storage returns a tuple containing directories and files. """ self.assertFalse(self.storage.exists('storage_test_1')) self.assertFalse(self.storage.exists('storage_test_2')) self.assertFalse(self.storage.exists('storage_dir_1')) self.storage.save('storage_test_1', ContentFile('custom content')) self.storage.save('storage_test_2', ContentFile('custom content')) os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1')) dirs, files = self.storage.listdir('') self.assertEqual(set(dirs), {'storage_dir_1'}) self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'}) self.storage.delete('storage_test_1') self.storage.delete('storage_test_2') os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1')) def test_file_storage_prevents_directory_traversal(self): """ File storage prevents directory traversal (files can only be accessed if they're below the storage location). """ with self.assertRaises(SuspiciousOperation): self.storage.exists('..') with self.assertRaises(SuspiciousOperation): self.storage.exists('/etc/passwd') def test_file_storage_preserves_filename_case(self): """The storage backend should preserve case of filenames.""" # Create a storage backend associated with the mixed case name # directory. other_temp_storage = self.storage_class(location=self.temp_dir2) # Ask that storage backend to store a file with a mixed case filename. mixed_case = 'CaSe_SeNsItIvE' file = other_temp_storage.open(mixed_case, 'w') file.write('storage contents') file.close() self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case)) other_temp_storage.delete(mixed_case) def test_makedirs_race_handling(self): """ File storage should be robust against directory creation race conditions. """ real_makedirs = os.makedirs # Monkey-patch os.makedirs, to simulate a normal call, a raced call, # and an error. def fake_makedirs(path): if path == os.path.join(self.temp_dir, 'normal'): real_makedirs(path) elif path == os.path.join(self.temp_dir, 'raced'): real_makedirs(path) raise OSError(errno.EEXIST, 'simulated EEXIST') elif path == os.path.join(self.temp_dir, 'error'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.makedirs = fake_makedirs self.storage.save('normal/test.file', ContentFile('saved normally')) with self.storage.open('normal/test.file') as f: self.assertEqual(f.read(), b'saved normally') self.storage.save('raced/test.file', ContentFile('saved with race')) with self.storage.open('raced/test.file') as f: self.assertEqual(f.read(), b'saved with race') # Check that OSErrors aside from EEXIST are still raised. with self.assertRaises(OSError): self.storage.save('error/test.file', ContentFile('not saved')) finally: os.makedirs = real_makedirs def test_remove_race_handling(self): """ File storage should be robust against file removal race conditions. """ real_remove = os.remove # Monkey-patch os.remove, to simulate a normal call, a raced call, # and an error. def fake_remove(path): if path == os.path.join(self.temp_dir, 'normal.file'): real_remove(path) elif path == os.path.join(self.temp_dir, 'raced.file'): real_remove(path) raise OSError(errno.ENOENT, 'simulated ENOENT') elif path == os.path.join(self.temp_dir, 'error.file'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.remove = fake_remove self.storage.save('normal.file', ContentFile('delete normally')) self.storage.delete('normal.file') self.assertFalse(self.storage.exists('normal.file')) self.storage.save('raced.file', ContentFile('delete with race')) self.storage.delete('raced.file') self.assertFalse(self.storage.exists('normal.file')) # Check that OSErrors aside from ENOENT are still raised. self.storage.save('error.file', ContentFile('delete with error')) with self.assertRaises(OSError): self.storage.delete('error.file') finally: os.remove = real_remove def test_file_chunks_error(self): """ Test behavior when file.chunks() is raising an error """ f1 = ContentFile('chunks fails') def failing_chunks(): raise IOError f1.chunks = failing_chunks with self.assertRaises(IOError): self.storage.save('error.file', f1) def test_delete_no_name(self): """ Calling delete with an empty name should not try to remove the base storage directory, but fail loudly (#20660). """ with self.assertRaises(AssertionError): self.storage.delete('') @override_settings( MEDIA_ROOT='media_root', MEDIA_URL='media_url/', FILE_UPLOAD_PERMISSIONS=0o777, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777, ) def test_setting_changed(self): """ Properties using settings values as defaults should be updated on referenced settings change while specified values should be unchanged. """ storage = self.storage_class( location='explicit_location', base_url='explicit_base_url/', file_permissions_mode=0o666, directory_permissions_mode=0o666, ) defaults_storage = self.storage_class() settings = { 'MEDIA_ROOT': 'overriden_media_root', 'MEDIA_URL': 'overriden_media_url/', 'FILE_UPLOAD_PERMISSIONS': 0o333, 'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333, } with self.settings(**settings): self.assertEqual(storage.base_location, 'explicit_location') self.assertIn('explicit_location', storage.location) self.assertEqual(storage.base_url, 'explicit_base_url/') self.assertEqual(storage.file_permissions_mode, 0o666) self.assertEqual(storage.directory_permissions_mode, 0o666) self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT']) self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location) self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL']) self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS']) self.assertEqual( defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS'] ) class CustomStorage(FileSystemStorage): def get_available_name(self, name, max_length=None): """ Append numbers to duplicate files rather than underscores, like Trac. """ parts = name.split('.') basename, ext = parts[0], parts[1:] number = 2 while self.exists(name): name = '.'.join([basename, str(number)] + ext) number += 1 return name class CustomStorageTests(FileStorageTests): storage_class = CustomStorage def test_custom_get_available_name(self): first = self.storage.save('custom_storage', ContentFile('custom contents')) self.assertEqual(first, 'custom_storage') second = self.storage.save('custom_storage', ContentFile('more contents')) self.assertEqual(second, 'custom_storage.2') self.storage.delete(first) self.storage.delete(second) class CustomStorageLegacyDatetimeHandling(FileSystemStorage): # Use the legacy accessed_time() et al from FileSystemStorage and the # shim get_accessed_time() et al from the Storage baseclass. Both of those # raise warnings, so the testcase class ignores them all. def get_accessed_time(self, name): return super(FileSystemStorage, self).get_accessed_time(name) def get_created_time(self, name): return super(FileSystemStorage, self).get_created_time(name) def get_modified_time(self, name): return super(FileSystemStorage, self).get_modified_time(name) @ignore_warnings(category=RemovedInDjango20Warning) class CustomStorageLegacyDatetimeHandlingTests(FileStorageTests): storage_class = CustomStorageLegacyDatetimeHandling class DiscardingFalseContentStorage(FileSystemStorage): def _save(self, name, content): if content: return super(DiscardingFalseContentStorage, self)._save(name, content) return '' class DiscardingFalseContentStorageTests(FileStorageTests): storage_class = DiscardingFalseContentStorage def test_custom_storage_discarding_empty_content(self): """ When Storage.save() wraps a file-like object in File, it should include the name argument so that bool(file) evaluates to True (#26495). """ output = six.StringIO('content') self.storage.save('tests/stringio', output) self.assertTrue(self.storage.exists('tests/stringio')) with self.storage.open('tests/stringio') as f: self.assertEqual(f.read(), b'content') class FileFieldStorageTests(TestCase): def tearDown(self): shutil.rmtree(temp_storage_location) def _storage_max_filename_length(self, storage): """ Query filesystem for maximum filename length (e.g. AUFS has 242). """ dir_to_test = storage.location while not os.path.exists(dir_to_test): dir_to_test = os.path.dirname(dir_to_test) try: return os.pathconf(dir_to_test, 'PC_NAME_MAX') except Exception: return 255 # Should be safe on most backends def test_files(self): self.assertIsInstance(Storage.normal, FileDescriptor) # An object without a file has limited functionality. obj1 = Storage() self.assertEqual(obj1.normal.name, "") with self.assertRaises(ValueError): obj1.normal.size # Saving a file enables full functionality. obj1.normal.save("django_test.txt", ContentFile("content")) self.assertEqual(obj1.normal.name, "tests/django_test.txt") self.assertEqual(obj1.normal.size, 7) self.assertEqual(obj1.normal.read(), b"content") obj1.normal.close() # File objects can be assigned to FileField attributes, but shouldn't # get committed until the model it's attached to is saved. obj1.normal = SimpleUploadedFile("assignment.txt", b"content") dirs, files = temp_storage.listdir("tests") self.assertEqual(dirs, []) self.assertNotIn("assignment.txt", files) obj1.save() dirs, files = temp_storage.listdir("tests") self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"]) # Save another file with the same name. obj2 = Storage() obj2.normal.save("django_test.txt", ContentFile("more content")) obj2_name = obj2.normal.name six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) self.assertEqual(obj2.normal.size, 12) obj2.normal.close() # Deleting an object does not delete the file it uses. obj2.delete() obj2.normal.save("django_test.txt", ContentFile("more content")) self.assertNotEqual(obj2_name, obj2.normal.name) six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) obj2.normal.close() def test_filefield_read(self): # Files can be read in a little at a time, if necessary. obj = Storage.objects.create( normal=SimpleUploadedFile("assignment.txt", b"content")) obj.normal.open() self.assertEqual(obj.normal.read(3), b"con") self.assertEqual(obj.normal.read(), b"tent") self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"]) obj.normal.close() def test_filefield_write(self): # Files can be written to. obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content')) with obj.normal as normal: normal.open('wb') normal.write(b'updated') obj.refresh_from_db() self.assertEqual(obj.normal.read(), b'updated') obj.normal.close() def test_duplicate_filename(self): # Multiple files with the same name get _(7 random chars) appended to them. objs = [Storage() for i in range(2)] for o in objs: o.normal.save("multiple_files.txt", ContentFile("Same Content")) try: names = [o.normal.name for o in objs] self.assertEqual(names[0], "tests/multiple_files.txt") six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX) finally: for o in objs: o.delete() def test_file_truncation(self): # Given the max_length is limited, when multiple files get uploaded # under the same name, then the filename get truncated in order to fit # in _(7 random chars). When most of the max_length is taken by # dirname + extension and there are not enough characters in the # filename to truncate, an exception should be raised. objs = [Storage() for i in range(2)] filename = 'filename.ext' for o in objs: o.limited_length.save(filename, ContentFile('Same Content')) try: # Testing truncation. names = [o.limited_length.name for o in objs] self.assertEqual(names[0], 'tests/%s' % filename) six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX) # Testing exception is raised when filename is too short to truncate. filename = 'short.longext' objs[0].limited_length.save(filename, ContentFile('Same Content')) with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'): objs[1].limited_length.save(*(filename, ContentFile('Same Content'))) finally: for o in objs: o.delete() @unittest.skipIf( sys.platform.startswith('win'), "Windows supports at most 260 characters in a path.", ) def test_extended_length_storage(self): # Testing FileField with max_length > 255. Most systems have filename # length limitation of 255. Path takes extra chars. filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension. obj = Storage() obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content')) self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename) self.assertEqual(obj.extended_length.read(), b'Same Content') obj.extended_length.close() def test_filefield_default(self): # Default values allow an object to access a single file. temp_storage.save('tests/default.txt', ContentFile('default content')) obj = Storage.objects.create() self.assertEqual(obj.default.name, "tests/default.txt") self.assertEqual(obj.default.read(), b"default content") obj.default.close() # But it shouldn't be deleted, even if there are no more objects using # it. obj.delete() obj = Storage() self.assertEqual(obj.default.read(), b"default content") obj.default.close() def test_empty_upload_to(self): # upload_to can be empty, meaning it does not use subdirectory. obj = Storage() obj.empty.save('django_test.txt', ContentFile('more content')) self.assertEqual(obj.empty.name, "django_test.txt") self.assertEqual(obj.empty.read(), b"more content") obj.empty.close() def test_random_upload_to(self): # Verify the fix for #5655, making sure the directory is only # determined once. obj = Storage() obj.random.save("random_file", ContentFile("random content")) self.assertTrue(obj.random.name.endswith("/random_file")) obj.random.close() def test_custom_valid_name_callable_upload_to(self): """ Storage.get_valid_name() should be called when upload_to is a callable. """ obj = Storage() obj.custom_valid_name.save("random_file", ContentFile("random content")) # CustomValidNameStorage.get_valid_name() appends '_valid' to the name self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid")) obj.custom_valid_name.close() def test_filefield_pickling(self): # Push an object into the cache to make sure it pickles properly obj = Storage() obj.normal.save("django_test.txt", ContentFile("more content")) obj.normal.close() cache.set("obj", obj) self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt") def test_file_object(self): # Create sample file temp_storage.save('tests/example.txt', ContentFile('some content')) # Load it as python file object with open(temp_storage.path('tests/example.txt')) as file_obj: # Save it using storage and read its content temp_storage.save('tests/file_obj', file_obj) self.assertTrue(temp_storage.exists('tests/file_obj')) with temp_storage.open('tests/file_obj') as f: self.assertEqual(f.read(), b'some content') def test_stringio(self): # Test passing StringIO instance as content argument to save output = six.StringIO() output.write('content') output.seek(0) # Save it and read written file temp_storage.save('tests/stringio', output) self.assertTrue(temp_storage.exists('tests/stringio')) with temp_storage.open('tests/stringio') as f: self.assertEqual(f.read(), b'content') # Tests for a race condition on file saving (#4948). # This is written in such a way that it'll always pass on platforms # without threading. class SlowFile(ContentFile): def chunks(self): time.sleep(1) return super(ContentFile, self).chunks() class FileSaveRaceConditionTest(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) self.thread = threading.Thread(target=self.save_file, args=['conflict']) def tearDown(self): shutil.rmtree(self.storage_dir) def save_file(self, name): name = self.storage.save(name, SlowFile(b"Data")) def test_race_condition(self): self.thread.start() self.save_file('conflict') self.thread.join() files = sorted(os.listdir(self.storage_dir)) self.assertEqual(files[0], 'conflict') six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX) @unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.") class FileStoragePermissions(unittest.TestCase): def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) self.storage_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.storage_dir) os.umask(self.old_umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o654) def test_file_upload_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654) @override_settings(FILE_UPLOAD_PERMISSIONS=None) def test_file_upload_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_file_upload_directory_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_file_upload_directory_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask) class FileStoragePathParsing(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_directory_with_dot(self): """Regression test for #9610. If the directory name contains a dot and the file name doesn't, make sure we still mangle the file name instead of the directory name. """ self.storage.save('dotted.path/test', ContentFile("1")) self.storage.save('dotted.path/test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], 'test') six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX) def test_first_character_dot(self): """ File names with a dot as their first character don't have an extension, and the underscore should get added to the end. """ self.storage.save('dotted.path/.test', ContentFile("1")) self.storage.save('dotted.path/.test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], '.test') six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX) class ContentFileStorageTestCase(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_content_saving(self): """ Test that ContentFile can be saved correctly with the filesystem storage, both if it was initialized with string or unicode content""" self.storage.save('bytes.txt', ContentFile(b"content")) self.storage.save('unicode.txt', ContentFile("español")) @override_settings(ROOT_URLCONF='file_storage.urls') class FileLikeObjectTestCase(LiveServerTestCase): """ Test file-like objects (#15644). """ available_apps = [] def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(location=self.temp_dir) def tearDown(self): shutil.rmtree(self.temp_dir) def test_urllib2_urlopen(self): """ Test the File storage API with a file like object coming from urllib2.urlopen() """ file_like_object = urlopen(self.live_server_url + '/') f = File(file_like_object) stored_filename = self.storage.save("remote_file.html", f) remote_file = urlopen(self.live_server_url + '/') with self.storage.open(stored_filename) as stored_file: self.assertEqual(stored_file.read(), remote_file.read())
server.py
import sched import threading import time import requests from requests.compat import urljoin from flask import Flask, request, abort, Response, jsonify from flask_cors import CORS from models import DownloadSpec, DownloadRequest from sources_manager import SourcesManager REQUEST_KEEPING_TIMEOUT_SEC = 60 * 60 * 12 # 12 hours app = Flask(__name__) app.url_map.strict_slashes = False CORS(app) request_id_to_agent = {} scheduler = sched.scheduler(time.time, time.sleep) sources_manager = SourcesManager() def _build_url(host, port, path, protocol='http'): return urljoin(f'{protocol}://{host}:{port}', path) def _build_new_response_from_agent_response(agent_response): return Response(response=agent_response.content, status=agent_response.status_code, headers=dict(agent_response.headers)) def _forward_request_by_id(request_id): if request_id not in request_id_to_agent: abort(404) agent_host, agent_port = request_id_to_agent[request_id] agent_url = _build_url(agent_host, agent_port, request.path) agent_response = requests.request(request.method, agent_url, headers=request.headers, data=request.data) return _build_new_response_from_agent_response(agent_response) def _delete_request(request_id): del request_id_to_agent[request_id] def _schedule_request_deletion(request_id): scheduler.enter(REQUEST_KEEPING_TIMEOUT_SEC, 1, _delete_request, (request_id,)) scheduler.run() def _get_agent_host_port_by_spec(spec: DownloadSpec): try: if not spec: abort(400, 'Download spec must be specified.') agent_key = sources_manager.get_agent_key_for_spec(spec) except ValueError: abort(400, 'No agent can satisfy the given spec. Head over to /sources to list supported sources.') return sources_manager.get_host_port_for_agent(agent_key) @app.route('/search', methods=['POST']) def search(): spec = DownloadSpec.from_dict(request.json) agent_host, agent_port = _get_agent_host_port_by_spec(spec) agent_url = _build_url(agent_host, agent_port, request.path) agent_response = requests.request(request.method, agent_url, headers=request.headers, data=request.data) return _build_new_response_from_agent_response(agent_response) @app.route('/download', methods=['POST']) def submit_download_request(): dl_req = DownloadRequest.from_dict(request.json) agent_host, agent_port = _get_agent_host_port_by_spec(dl_req.spec) agent_url = _build_url(agent_host, agent_port, request.path) agent_response = requests.request(request.method, agent_url, headers=request.headers, data=request.data) resp_json = agent_response.json() returned_dl_req = DownloadRequest.from_dict(resp_json) request_id_to_agent[returned_dl_req.id] = (agent_host, agent_port) job_thread = threading.Thread(target=_schedule_request_deletion, args=(returned_dl_req.id,)) job_thread.start() return _build_new_response_from_agent_response(agent_response) @app.route('/download/<string:request_id>', methods=['GET']) def get_download_request(request_id): return _forward_request_by_id(request_id) @app.route('/download/<string:request_id>/files', methods=['GET']) def get_download_files(request_id): response = _forward_request_by_id(request_id) response.headers['Access-Control-Expose-Headers'] = 'Content-Disposition' return response @app.route('/sources', methods=['GET']) def get_supported_sources(): return jsonify(sources_manager.get_supported_sources()) if __name__ == '__main__': app.run(host='0.0.0.0', port=8080)
Camera.py
#!/usr/bin/env python3 """ Programmer: Chris Blanks Last Edited: March 2019 Project: Automated Self-Serving System Purpose: This script defines the Camera class. It inherits the basic attributes of the peripheral device class, so that it can have a standard interface that a MainApp instance can use. Also it uses UtilityFuncs functions in order to apply filters to frames captured by a PiCamera. """ #Built-Ins import os import time import threading import datetime import random import tkinter as tk #3rd Party import imutils import cv2 from PIL import Image, ImageTk from imutils.video import VideoStream #My Modules import UtilityFuncs as UF from PeripheralDevice import PeripheralDevice class Camera(PeripheralDevice): ALIAS_NAME = "UF" #concatenated with strings of functions def __init__(self,main_app_instance): super().__init__(main_app_instance) #PeripheralDevice info self.name = "camera" self.state = "off" #not on until threading starts self.pin_number = None self.buffer = None #will pass images through this variable self.buffer_data_type = "ImageTk's PhotoImages" self.face_xml_path = "{}/haarcascade_frontalface_default.xml".format(self.main_app.CASCADES_PATH) self.eyes_xml_path = "{}/haarcascade_eye.xml".format(self.main_app.CASCADES_PATH) self.face_cascade = cv2.CascadeClassifier(self.face_xml_path) #takes about 0.2 seconds to complete self.eyes_cascade = cv2.CascadeClassifier(self.eyes_xml_path) #another performance hit.. oh no! def startThreading(self,video_tk_label): """Starts thread for reading camera input. Takes video_tk_label arg for continuously updating a Tk label's image parameter in order to show the Picamera's video output.""" self.vs = VideoStream(usePiCamera=1).start() #video stream object self.state = "enabled" self.MAX_WIDTH = 500 #sets max width of frames self.frame = None self.panel = None self.thread = None #video will have to run a separate thread draw_funcs = [] #will store name of functions as strings format_str = "{}.{}(self.frame,self.buffer,self.face_cascade)" format_str2 = "{}.{}(self.frame,self.buffer,self.face_cascade,self.eyes_cascade)" [draw_funcs.append( dir(UF)[j] ) for j in range(0, len(dir(UF))-1) if "draw" in dir(UF)[j] ] #searches for any items in UtilityFuncs that has "draw" in its name face_funcs = [format_str.format(self.ALIAS_NAME,item) for item in draw_funcs if "Eyes" not in item] eye_funcs = [format_str2.format(self.ALIAS_NAME,item) for item in draw_funcs if "Eyes" in item] all_funcs = face_funcs + eye_funcs #after doing separate formatting, select one for the current filter selection = random.randint(0,len(all_funcs)-1) self.current_filter = all_funcs[selection] print("\nFilter function to execute is: {}\n".format(self.current_filter)) self.stopEvent = threading.Event() #controls exit behavior of GUI self.thread = threading.Thread(target=self.updateFrame,kwargs={'tk_label': video_tk_label}) self.thread.start() #starts a separate thread to avoid conflicts w/ GUI def updateFrame(self,tk_label): """Starts an infinite loop in the thread that will read each frame from the Picamera and display it in the GUI application.""" self.panel = tk_label time.sleep(2.0) #allow the camera to warm up try: initialFrame = True while not self.stopEvent.is_set() : self.frame = self.vs.read() self.frame = imutils.resize(self.frame,width=self.MAX_WIDTH) #cv2 makes images use BGR color space by default, but #need RGB for Image objects self.buffer = cv2.cvtColor(self.frame,cv2.COLOR_BGR2RGB) exec(self.current_filter) #executes the function in the string self.buffer = Image.fromarray(self.buffer) #convets Mat object to Image self.buffer = ImageTk.PhotoImage(self.buffer) #image is now TK compatible if initialFrame: self.panel.configure(image=self.buffer) self.panel.image = self.buffer self.panel.pack(padx=10,pady=10) initialFrame = False else: self.panel.configure(image=self.buffer) self.panel.image = self.buffer #needed so image will be displayed except RuntimeError: print("Runtime error!") def onExit(self): """Performs the exit behavior.""" print("Exiting...") self.stopEvent.set() #when set, the continuous loop in callback stops self.vs.stop() #stop video stream self.state = "off"
threads.py
"""Threads: When you use time.sleep() your program cannot do anything else, unless you use threads. A single-thread program is like placing one finger on a line of code, then moving to the next. A multi-threaded program has multiple "fingers" """ import threading, time def pause(): time.sleep(5) print('Wake up! ' + threading.currentThread().name) print('Program start') # main A = threading.Thread(target=pause) # second A.start() B = threading.Thread(target=pause, name='B') # third B.start() print('Program end') # main # Program start # Program end # Wake up! Thread-1 # Wake up! B
thumbnail_maker.py
# thumbnail_maker.py import time import os import logging from urllib.parse import urlparse from urllib.request import urlretrieve from queue import Queue from threading import Thread import PIL from PIL import Image FORMAT = "[%(threadName)s, %(asctime)s, %(levelname)s] %(message)s" logging.basicConfig(filename='logfile.log', level=logging.DEBUG, format=FORMAT) class ThumbnailMakerService(object): def __init__(self, home_dir='.'): self.home_dir = home_dir self.input_dir = self.home_dir + os.path.sep + 'incoming' self.output_dir = self.home_dir + os.path.sep + 'outgoing' self.dl_queue = Queue() self.img_queue = Queue() def download_image(self): while not self.dl_queue.empty(): try: url = self.dl_queue.get(block=False) img_filename = urlparse(url).path.split('/')[-1] urlretrieve(url, self.input_dir + os.path.sep + img_filename) self.img_queue.put(img_filename) self.dl_queue.task_done() except Queue.Empty: logging.info("Queue empty") # def download_images(self, img_url_list): # # validate inputs # if not img_url_list: # return # os.makedirs(self.input_dir, exist_ok=True) # logging.info("beginning image downloads") # start = time.perf_counter() # for url in img_url_list: # # download each image and save to the input dir # img_filename = urlparse(url).path.split('/')[-1] # urlretrieve(url, self.input_dir + os.path.sep + img_filename) # self.img_queue.put(img_filename) # end = time.perf_counter() # self.img_queue.put(None) # logging.info("downloaded {} images in {} seconds".format(len(img_url_list), end - start)) def perform_resizing(self): # validate inputs os.makedirs(self.output_dir, exist_ok=True) logging.info("beginning image resizing") target_sizes = [32, 64, 200] num_images = len(os.listdir(self.input_dir)) start = time.perf_counter() while True: filename = self.img_queue.get() if filename: logging.info("resizing image {}".format(filename)) orig_img = Image.open(self.input_dir + os.path.sep + filename) for basewidth in target_sizes: img = orig_img # calculate target height of the resized image to maintain the aspect ratio wpercent = (basewidth / float(img.size[0])) hsize = int((float(img.size[1]) * float(wpercent))) # perform resizing img = img.resize((basewidth, hsize), PIL.Image.LANCZOS) # save the resized image to the output dir with a modified file name new_filename = os.path.splitext(filename)[0] + \ '_' + str(basewidth) + os.path.splitext(filename)[1] img.save(self.output_dir + os.path.sep + new_filename) os.remove(self.input_dir + os.path.sep + filename) logging.info("done resizing") self.img_queue.task_done() else: self.img_queue.task_done() break end = time.perf_counter() logging.info("created {} thumbnails in {} seconds".format(num_images, end - start)) def make_thumbnails(self, img_url_list): logging.info("START make_thumbnails") start = time.perf_counter() for img_url in img_url_list: self.dl_queue.put(img_url) num_dl_threads = 4 for _ in range(num_dl_threads): t = Thread(target=self.download_image) t.start() t2 = Thread(target=self.perform_resizing) t2.start() self.dl_queue.join() self.img_queue.put(None) t2.join() end = time.perf_counter() logging.info("END make_thumbnails in {} seconds".format(end - start))
mixins.py
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins # Copyright (c) 2018-2019 Ben Nuttall <ben@bennuttall.com> # Copyright (c) 2016-2019 Dave Jones <dave@waveform.org.uk> # Copyright (c) 2016 Andrew Scheller <github@loowis.durge.org> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import ( unicode_literals, print_function, absolute_import, division, ) nstr = str str = type('') import inspect import weakref from functools import wraps, partial from threading import Event from collections import deque try: from statistics import median except ImportError: from .compat import median import warnings from .threads import GPIOThread from .exc import ( BadEventHandler, BadWaitTime, BadQueueLen, DeviceClosed, CallbackSetToNone, ) callback_warning = ( 'The callback was set to None. This may have been unintentional ' 'e.g. btn.when_pressed = pressed() instead of btn.when_pressed = pressed' ) class ValuesMixin(object): """ Adds a :attr:`values` property to the class which returns an infinite generator of readings from the :attr:`~Device.value` property. There is rarely a need to use this mixin directly as all base classes in GPIO Zero include it. .. note:: Use this mixin *first* in the parent class list. """ @property def values(self): """ An infinite iterator of values read from :attr:`value`. """ while True: try: yield self.value except DeviceClosed: break class SourceMixin(object): """ Adds a :attr:`source` property to the class which, given an iterable or a :class:`ValuesMixin` descendent, sets :attr:`~Device.value` to each member of that iterable until it is exhausted. This mixin is generally included in novel output devices to allow their state to be driven from another device. .. note:: Use this mixin *first* in the parent class list. """ def __init__(self, *args, **kwargs): self._source = None self._source_thread = None self._source_delay = 0.01 super(SourceMixin, self).__init__(*args, **kwargs) def close(self): self.source = None super(SourceMixin, self).close() def _copy_values(self, source): for v in source: self.value = v if self._source_thread.stopping.wait(self._source_delay): break @property def source_delay(self): """ The delay (measured in seconds) in the loop used to read values from :attr:`source`. Defaults to 0.01 seconds which is generally sufficient to keep CPU usage to a minimum while providing adequate responsiveness. """ return self._source_delay @source_delay.setter def source_delay(self, value): if value < 0: raise BadWaitTime('source_delay must be 0 or greater') self._source_delay = float(value) @property def source(self): """ The iterable to use as a source of values for :attr:`value`. """ return self._source @source.setter def source(self, value): if getattr(self, '_source_thread', None): self._source_thread.stop() self._source_thread = None if isinstance(value, ValuesMixin): value = value.values self._source = value if value is not None: self._source_thread = GPIOThread(target=self._copy_values, args=(value,)) self._source_thread.start() class SharedMixin(object): """ This mixin marks a class as "shared". In this case, the meta-class (GPIOMeta) will use :meth:`_shared_key` to convert the constructor arguments to an immutable key, and will check whether any existing instances match that key. If they do, they will be returned by the constructor instead of a new instance. An internal reference counter is used to determine how many times an instance has been "constructed" in this way. When :meth:`~Device.close` is called, an internal reference counter will be decremented and the instance will only close when it reaches zero. """ _instances = {} def __del__(self): self._refs = 0 super(SharedMixin, self).__del__() @classmethod def _shared_key(cls, *args, **kwargs): """ Given the constructor arguments, returns an immutable key representing the instance. The default simply assumes all positional arguments are immutable. """ return args class EventsMixin(object): """ Adds edge-detected :meth:`when_activated` and :meth:`when_deactivated` events to a device based on changes to the :attr:`~Device.is_active` property common to all devices. Also adds :meth:`wait_for_active` and :meth:`wait_for_inactive` methods for level-waiting. .. note:: Note that this mixin provides no means of actually firing its events; call :meth:`_fire_events` in sub-classes when device state changes to trigger the events. This should also be called once at the end of initialization to set initial states. """ def __init__(self, *args, **kwargs): super(EventsMixin, self).__init__(*args, **kwargs) self._active_event = Event() self._inactive_event = Event() self._when_activated = None self._when_deactivated = None self._last_active = None self._last_changed = self.pin_factory.ticks() def wait_for_active(self, timeout=None): """ Pause the script until the device is activated, or the timeout is reached. :type timeout: float or None :param timeout: Number of seconds to wait before proceeding. If this is :data:`None` (the default), then wait indefinitely until the device is active. """ return self._active_event.wait(timeout) def wait_for_inactive(self, timeout=None): """ Pause the script until the device is deactivated, or the timeout is reached. :type timeout: float or None :param timeout: Number of seconds to wait before proceeding. If this is :data:`None` (the default), then wait indefinitely until the device is inactive. """ return self._inactive_event.wait(timeout) @property def when_activated(self): """ The function to run when the device changes state from inactive to active. This can be set to a function which accepts no (mandatory) parameters, or a Python function which accepts a single mandatory parameter (with as many optional parameters as you like). If the function accepts a single mandatory parameter, the device that activated will be passed as that parameter. Set this property to :data:`None` (the default) to disable the event. """ return self._when_activated @when_activated.setter def when_activated(self, value): if self.when_activated is None and value is None: warnings.warn(CallbackSetToNone(callback_warning)) self._when_activated = self._wrap_callback(value) @property def when_deactivated(self): """ The function to run when the device changes state from active to inactive. This can be set to a function which accepts no (mandatory) parameters, or a Python function which accepts a single mandatory parameter (with as many optional parameters as you like). If the function accepts a single mandatory parameter, the device that deactivated will be passed as that parameter. Set this property to :data:`None` (the default) to disable the event. """ return self._when_deactivated @when_deactivated.setter def when_deactivated(self, value): if self.when_deactivated is None and value is None: warnings.warn(CallbackSetToNone(callback_warning)) self._when_deactivated = self._wrap_callback(value) @property def active_time(self): """ The length of time (in seconds) that the device has been active for. When the device is inactive, this is :data:`None`. """ if self._active_event.is_set(): return self.pin_factory.ticks_diff(self.pin_factory.ticks(), self._last_changed) else: return None @property def inactive_time(self): """ The length of time (in seconds) that the device has been inactive for. When the device is active, this is :data:`None`. """ if self._inactive_event.is_set(): return self.pin_factory.ticks_diff(self.pin_factory.ticks(), self._last_changed) else: return None def _wrap_callback(self, fn): if fn is None: return None elif not callable(fn): raise BadEventHandler('value must be None or a callable') # If fn is wrapped with partial (i.e. partial, partialmethod, or wraps # has been used to produce it) we need to dig out the "real" function # that's been wrapped along with all the mandatory positional args # used in the wrapper so we can test the binding args = () wrapped_fn = fn while isinstance(wrapped_fn, partial): args = wrapped_fn.args + args wrapped_fn = wrapped_fn.func if inspect.isbuiltin(wrapped_fn): # We can't introspect the prototype of builtins. In this case we # assume that the builtin has no (mandatory) parameters; this is # the most reasonable assumption on the basis that pre-existing # builtins have no knowledge of gpiozero, and the sole parameter # we would pass is a gpiozero object return fn else: # Try binding ourselves to the argspec of the provided callable. # If this works, assume the function is capable of accepting no # parameters try: inspect.getcallargs(wrapped_fn, *args) return fn except TypeError: try: # If the above fails, try binding with a single parameter # (ourselves). If this works, wrap the specified callback inspect.getcallargs(wrapped_fn, *(args + (self,))) @wraps(fn) def wrapper(): return fn(self) return wrapper except TypeError: raise BadEventHandler( 'value must be a callable which accepts up to one ' 'mandatory parameter') def _fire_activated(self): # These methods are largely here to be overridden by descendents if self.when_activated: self.when_activated() def _fire_deactivated(self): # These methods are largely here to be overridden by descendents if self.when_deactivated: self.when_deactivated() def _fire_events(self, ticks, new_active): # NOTE: in contrast to the pin when_changed event, this method takes # ticks and *is_active* (i.e. the device's .is_active) as opposed to a # pin's *state*. old_active, self._last_active = self._last_active, new_active if old_active is None: # Initial "indeterminate" state; set events but don't fire # callbacks as there's not necessarily an edge if new_active: self._active_event.set() else: self._inactive_event.set() elif old_active != new_active: self._last_changed = ticks if new_active: self._inactive_event.clear() self._active_event.set() self._fire_activated() else: self._active_event.clear() self._inactive_event.set() self._fire_deactivated() class HoldMixin(EventsMixin): """ Extends :class:`EventsMixin` to add the :attr:`when_held` event and the machinery to fire that event repeatedly (when :attr:`hold_repeat` is :data:`True`) at internals defined by :attr:`hold_time`. """ def __init__(self, *args, **kwargs): self._hold_thread = None super(HoldMixin, self).__init__(*args, **kwargs) self._when_held = None self._held_from = None self._hold_time = 1 self._hold_repeat = False self._hold_thread = HoldThread(self) def close(self): if self._hold_thread is not None: self._hold_thread.stop() self._hold_thread = None super(HoldMixin, self).close() def _fire_activated(self): if sef._hold_thread is not None: super(HoldMixin, self)._fire_activated() self._hold_thread.holding.set() def _fire_deactivated(self): self._held_from = None super(HoldMixin, self)._fire_deactivated() def _fire_held(self): if self.when_held: self.when_held() @property def when_held(self): """ The function to run when the device has remained active for :attr:`hold_time` seconds. This can be set to a function which accepts no (mandatory) parameters, or a Python function which accepts a single mandatory parameter (with as many optional parameters as you like). If the function accepts a single mandatory parameter, the device that activated will be passed as that parameter. Set this property to :data:`None` (the default) to disable the event. """ return self._when_held @when_held.setter def when_held(self, value): self._when_held = self._wrap_callback(value) @property def hold_time(self): """ The length of time (in seconds) to wait after the device is activated, until executing the :attr:`when_held` handler. If :attr:`hold_repeat` is True, this is also the length of time between invocations of :attr:`when_held`. """ return self._hold_time @hold_time.setter def hold_time(self, value): if value < 0: raise BadWaitTime('hold_time must be 0 or greater') self._hold_time = float(value) @property def hold_repeat(self): """ If :data:`True`, :attr:`when_held` will be executed repeatedly with :attr:`hold_time` seconds between each invocation. """ return self._hold_repeat @hold_repeat.setter def hold_repeat(self, value): self._hold_repeat = bool(value) @property def is_held(self): """ When :data:`True`, the device has been active for at least :attr:`hold_time` seconds. """ return self._held_from is not None @property def held_time(self): """ The length of time (in seconds) that the device has been held for. This is counted from the first execution of the :attr:`when_held` event rather than when the device activated, in contrast to :attr:`~EventsMixin.active_time`. If the device is not currently held, this is :data:`None`. """ if self._held_from is not None: return self.pin_factory.ticks_diff(self.pin_factory.ticks(), self._held_from) else: return None class HoldThread(GPIOThread): """ Extends :class:`GPIOThread`. Provides a background thread that repeatedly fires the :attr:`HoldMixin.when_held` event as long as the owning device is active. """ def __init__(self, parent): super(HoldThread, self).__init__( target=self.held, args=(weakref.proxy(parent),)) self.holding = Event() self.start() def held(self, parent): try: while not self.stopping.is_set(): if self.holding.wait(0.1): self.holding.clear() while not ( self.stopping.is_set() or parent._inactive_event.wait(parent.hold_time) ): if parent._held_from is None: parent._held_from = parent.pin_factory.ticks() parent._fire_held() if not parent.hold_repeat: break except ReferenceError: # Parent is dead; time to die! pass class GPIOQueue(GPIOThread): """ Extends :class:`GPIOThread`. Provides a background thread that monitors a device's values and provides a running *average* (defaults to median) of those values. If the *parent* device includes the :class:`EventsMixin` in its ancestry, the thread automatically calls :meth:`~EventsMixin._fire_events`. """ def __init__( self, parent, queue_len=5, sample_wait=0.0, partial=False, average=median, ignore=None): assert callable(average) super(GPIOQueue, self).__init__(target=self.fill) if queue_len < 1: raise BadQueueLen('queue_len must be at least one') if sample_wait < 0: raise BadWaitTime('sample_wait must be 0 or greater') if ignore is None: ignore = set() self.queue = deque(maxlen=queue_len) self.partial = bool(partial) self.sample_wait = float(sample_wait) self.full = Event() self.parent = weakref.proxy(parent) self.average = average self.ignore = ignore @property def value(self): if not self.partial: self.full.wait() try: return self.average(self.queue) except (ZeroDivisionError, ValueError): # No data == inactive value return 0.0 def fill(self): try: while not self.stopping.wait(self.sample_wait): value = self.parent._read() if value not in self.ignore: self.queue.append(value) if not self.full.is_set() and len(self.queue) >= self.queue.maxlen: self.full.set() if (self.partial or self.full.is_set()) and isinstance(self.parent, EventsMixin): self.parent._fire_events(self.parent.pin_factory.ticks(), self.parent.is_active) except ReferenceError: # Parent is dead; time to die! pass
demo.py
#!/usr/bin/python ''' Demo for depth display - for Siggraph E-Tech submission David Dunn Feb 2017 - created www.qenops.com ''' __author__ = ('David Dunn') __version__ = '1.0' import dDisplay as dd import dDisplay.varifocal as vf import dGraph as dg import dGraph.test.test2 as test import dGraph.ui as ui import multiprocessing as mp import numpy as np import time, math TIMING = False #TIMING = True #SIDE = 'Right' SIDE = 'Left' WINDOWS = [ { #"name": 'HMD Left', "name": 'HMD Right', #"location": (0, 0), #"location": (3266, 1936), # px coordinates of the startup screen for window location "location": (2640, 1936), # px coordinates of the startup screen for window location "size": (830, 800), # px size of the startup screen for centering "center": (290,216), # center of the display "refresh_rate": 60, # refreshrate of the display for precise time measuring "px_size_mm": 0.09766, # px size of the display in mm "distance_cm": 20, # distance from the viewer in cm, #"is_hmd": False, #"warp_path": 'data/calibration/newRight/', }, ] if SIDE == 'Left': WINDOWS[0]['name'] = 'HMD Left' WINDOWS[0]['location'] = (3266, 1936) START = 0. DEPTHS = [20,50,700] def setupDisplay(): led = False camera = 0 points = (( 0, 332.8, 229.4), ( 20, 314.2, 301.8), (100, 306.0, 323.8), (900, 290.1, 362.1), ) #dist = [p[0] for p in points] #ledX = [p[1] for p in points] #ledY = [p[2] for p in points] #dist = ( 0, 25, 33, 50, 66, 100, 150, 200,)# 500, 700) dist = None signal=None #ledX = (313.9,298.1,297.4,296.3,294.5,293.9,292.7,292.5,)# 292.7,292.7) #ledY = (227.9,279.7,281.2,283.2,286.9,287.9,290.0,290.5,)# 290.0,290.0) ledX = None ledY = None bounds = (0,1000,0,1000) minArea = 50 maxArea = 390 minThreshold=24 maxThreshold=200 thresholdStep=10 minDistance = 10 #minRepeatablilty=2 #circularity=True #minCircularity=80 port = '/dev/ttyACM0' display = vf.VarifocalDisplay(port=port,led=led,camera=camera,dist=dist,signal=signal,ledX=ledX,ledY=ledY,bounds=bounds,minArea=minArea,maxArea=maxArea,minThreshold=minThreshold,maxThreshold=maxThreshold, thresholdStep=thresholdStep) return display def runDisplay(display, requestedDepth, stillRunning): currentDepth = 20 signal = np.array([[ 0, 18,19.5], [-300, 0,10.5], [-300,-11, 0]]) while stillRunning.value: if requestedDepth.value != currentDepth: period = signal[DEPTHS.index(currentDepth),DEPTHS.index(requestedDepth.value)] currentDepth = requestedDepth.value display.sendSignal(period) display.close() def animate(renderStack): now = time.time()-START y = math.sin(now*math.pi) x = math.cos(now*math.pi*2)/4 #offset = np.array((x,y,0))*4 #rotate = np.array((5.,-15.,0.)) + offset #renderStack.objects['teapot'].setRotate(*rotate) renderStack.objects['teapot'].rotate += np.array((x,y,0.)) def addInput(requestedDepth,renderStack): ui.add_key_callback(switchDepths, ui.KEY_1, value=20, requestedDepth=requestedDepth,renderStack=renderStack) ui.add_key_callback(switchDepths, ui.KEY_2, value=50, requestedDepth=requestedDepth,renderStack=renderStack) ui.add_key_callback(switchDepths, ui.KEY_3, value=700, requestedDepth=requestedDepth,renderStack=renderStack) def switchDepths(window,value,requestedDepth,renderStack): print "Requesting Depth: %s"%value requestedDepth.value = value signal = np.array([[-.02,.02 , -2.], [-.05,.1052, -5.], [-.205,.53 ,-20.], #[-.70,1.945,-70.], ]) '''signal = np.array([[-.02,.02 , -2.], [-.02,.0295, -2.], [-.02,.031 , -2.], ])''' scaler = np.array([[.035,.035,.035], [.029,.0255,.029], [.0278,.0238,.0278], ]) trans = signal[DEPTHS.index(value)] scale = scaler[DEPTHS.index(value)] renderStack.objects['teapot'].setTranslate(*trans) #renderStack.objects['teapot'].setScale(*scale) def setup(): ui.init() windows = [] idx = 0 winData = WINDOWS[idx] renderStack = ui.RenderStack() renderStack.display = dd.Display(resolution=winData['size']) share = None if idx == 0 else windows[0] window = renderStack.addWindow(ui.open_window(winData['name'], winData['location'][0], winData['location'][1], renderStack.display.width, renderStack.display.height, share=share)) if not window: ui.terminate() exit(1) ui.make_context_current(window) dg.initGL() windows.append(window) ui.add_key_callback(ui.close_window, ui.KEY_ESCAPE) scene = test.loadScene(renderStack) renderStack.graphicsCardInit() return renderStack, scene, windows def runLoop(renderStack, windows, requestedDepth, stillRunning): # Print message to console, and kick off the loop to get it rolling. print("Hit ESC key to quit.") index = 0 while not ui.window_should_close(windows[0]): animate(renderStack) ui.make_context_current(windows[0]) test.drawScene(renderStack) ui.swap_buffers(windows[0]) if TIMING: now = time.time() if (now % 5) < .01: index = int(((now % 15)/5))%len(DEPTHS) switchDepths(windows[0],DEPTHS[index],requestedDepth,renderStack) ui.poll_events() #ui.wait_events() stillRunning.value=False ui.terminate() exit(0) def runDemo(): global START now = time.time() while now % 4 > .0001: now = time.time() START = time.time() renderStack, scene, windows = setup() if SIDE == 'Left': display = setupDisplay() requestedDepth = mp.Value('d',20) addInput(requestedDepth,renderStack) stillRunning = mp.Value('b', True) processes = [] if SIDE == 'Left': processes.append(mp.Process(target=runDisplay, args=(display, requestedDepth,stillRunning))) for p in processes: p.start() runLoop(renderStack,windows,requestedDepth, stillRunning) stillRunning.value=False for p in processes: p.join() if __name__ == '__main__': runDemo()
qactabase.py
import copy import datetime import json import os import re import sys import threading import time import uuid import pandas as pd import pymongo import requests from qaenv import (eventmq_amqp, eventmq_ip, eventmq_password, eventmq_port, eventmq_username, mongo_ip, mongo_uri) from QAPUBSUB.consumer import subscriber, subscriber_routing, subscriber_topic from QAPUBSUB.producer import publisher_routing, publisher_topic import QUANTAXIS as QA from QAStrategy.util import QA_data_futuremin_resample from QIFIAccount import ORDER_DIRECTION, QIFI_Account from QUANTAXIS.QAARP import QA_Risk, QA_User from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, RUNNING_ENVIRONMENT class QAStrategyBase(): def __init__(self, code='rb2005', frequence='1min', strategy_id='QA_STRATEGY', risk_check_gap=1, portfolio='default', start='2020-01-01', end='2020-05-21', init_cash=1000000, send_wx=False, data_host=eventmq_ip, data_port=eventmq_port, data_user=eventmq_username, data_password=eventmq_password, trade_host=eventmq_ip, trade_port=eventmq_port, trade_user=eventmq_username, trade_password=eventmq_password, taskid=None, mongo_ip=mongo_ip, model='py'): """ code 可以传入单个标的 也可以传入一组标的(list) 会自动基于code来判断是什么市场 TODO: 支持多个市场同时存在 self.trade_host 交易所在的eventmq的ip [挂ORDER_ROUTER的] / """ self.username = 'admin' self.password = 'admin' self.trade_host = trade_host self.code = code self.frequence = frequence self.strategy_id = strategy_id self.portfolio = portfolio self.data_host = data_host self.data_port = data_port self.data_user = data_user self.data_password = data_password self.trade_host = trade_host self.trade_port = trade_port self.trade_user = trade_user self.trade_password = trade_password self.start = start self.end = end self.init_cash = init_cash self.taskid = taskid self.running_time = '' self.market_preset = QA.QAARP.MARKET_PRESET() self._market_data = [] self.risk_check_gap = risk_check_gap self.latest_price = {} self.isupdate = False self.model = model self.new_data = {} self._systemvar = {} self._signal = [] self.send_wx = send_wx if isinstance(self.code, str): self.last_order_towards = {self.code: {'BUY': '', 'SELL': ''}} else: self.last_order_towards = dict( zip(self.code, [{'BUY': '', 'SELL': ''} for i in range(len(self.code))])) self.dt = '' if isinstance(self.code, str): self.market_type = MARKET_TYPE.STOCK_CN else: # self.market_type = MARKET_TYPE.FUTURE_CN if re.search( # r'[a-zA-z]+', self.code[0]) else MARKET_TYPE.STOCK_CN self.market_type = MARKET_TYPE.STOCK_CN self.bar_order = {'BUY_OPEN': 0, 'SELL_OPEN': 0, 'BUY_CLOSE': 0, 'SELL_CLOSE': 0} self._num_cached = 120 self._cached_data = [] self.user_init() @property def bar_id(self): return len(self._market_data) @property def BarsSinceEntryLong(self): return self.bar_id - self.bar_order.get('BUY_OPEN', self.bar_id) @property def BarsSinceEntryShort(self): return self.bar_id - self.bar_order.get('SELL_OPEN', self.bar_id) @property def EntryPriceLong(self): code = self.get_code() return self.get_positions(code).open_price_long @property def EntryPriceShort(self): code = self.get_code() return self.get_positions(code).open_price_short def on_sync(self): if self.running_mode != 'backtest': self.pubacc.pub(json.dumps(self.acc.message), routing_key=self.strategy_id) def _debug_sim(self): self.running_mode = 'sim' if self.frequence.endswith('min'): if isinstance(self.code, str): self._old_data = QA.QA_fetch_get_future_min('tdx', self.code.upper(), QA.QA_util_get_last_day( QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()), self.frequence)[:-1].set_index(['datetime', 'code']) self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [ 'open', 'high', 'low', 'close', 'volume']] else: self._old_data = pd.concat([QA.QA_fetch_get_future_min('tdx', item.upper(), QA.QA_util_get_last_day( QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()), self.frequence)[:-1].set_index(['datetime', 'code']) for item in self.code], sort=False) self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [ 'open', 'high', 'low', 'close', 'volume']] else: self._old_data = pd.DataFrame() self.database = pymongo.MongoClient(mongo_ip).QAREALTIME self.client = self.database.account self.subscriber_client = self.database.subscribe self.acc = QIFI_Account( username=self.strategy_id, password=self.strategy_id, trade_host=mongo_ip, init_cash=self.init_cash) self.acc.initial() self.acc.on_sync = self.on_sync self.pub = publisher_routing(exchange='QAORDER_ROUTER', host=self.trade_host, port=self.trade_port, user=self.trade_user, password=self.trade_password) self.pubacc = publisher_topic(exchange='QAAccount', host=self.trade_host, port=self.trade_port, user=self.trade_user, password=self.trade_password) if isinstance(self.code, str): self.subscribe_data(self.code.lower(), self.frequence, self.data_host, self.data_port, self.data_user, self.data_password, self.model) else: self.subscribe_multi(self.code, self.frequence, self.data_host, self.data_port, self.data_user, self.data_password, self.model) print('account {} start sim'.format(self.strategy_id)) self.database.strategy_schedule.job_control.update( {'strategy_id': self.strategy_id}, {'strategy_id': self.strategy_id, 'taskid': self.taskid, 'filepath': os.path.abspath(__file__), 'status': 200}, upsert=True) def debug_sim(self): self._debug_sim() threading.Thread(target=self.sub.start, daemon=True).start() def run_sim(self): self._debug_sim() self.sub.start() def run_backtest(self): self.debug() self.acc.save() risk = QA_Risk(self.acc) risk.save() try: """add rank flow if exist QARank是我们内部用于评价策略ELO的库 此处并不影响正常使用 """ from QARank import QA_Rank QA_Rank(self.acc).send() except: pass def user_init(self): """ 用户自定义的init过程 """ pass def debug(self): raise NotImplementedError # self.running_mode = 'backtest' # self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS # user = QA_User(username=self.username, password=self.password) # port = user.new_portfolio(self.portfolio) # self.acc = port.new_accountpro( # account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type, frequence=self.frequence) # self.positions = self.acc.get_position(self.code) # # print(self.acc) # print(self.acc.market_type) # data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO, # frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT) # # data.data.apply(self.x1, axis=1) def x1(self, item): raise NotImplementedError # self.latest_price[item.name[1]] = item['close'] # if str(item.name[0])[0:10] != str(self.running_time)[0:10]: # self.on_dailyclose() # self.on_dailyopen() # if self.market_type == QA.MARKET_TYPE.STOCK_CN: # # print('backtest: Settle!') # self.acc.settle() # self._on_1min_bar() # self._market_data.append(item) # self.running_time = str(item.name[0]) # self.on_bar(item) def debug_t0(self): self.running_mode = 'backtest' self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS user = QA_User(username=self.username, password=self.password) port = user.new_portfolio(self.portfolio) self.acc = port.new_accountpro( account_cookie=self.strategy_id, init_cash=self.init_cash, init_hold={ self.code: 100000}, market_type=self.market_type, running_environment=RUNNING_ENVIRONMENT.TZERO) self.positions = self.acc.get_position(self.code) data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO, frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT) def x1(item): self.latest_price[item.name[1]] = item['close'] if str(item.name[0])[0:10] != str(self.running_time)[0:10]: self.on_dailyclose() for order in self.acc.close_positions_order: order.trade('closebySys', order.price, order.amount, order.datetime) self.on_dailyopen() if self.market_type == QA.MARKET_TYPE.STOCK_CN: print('backtest: Settle!') self.acc.settle() self._on_1min_bar() self._market_data.append(item) self.running_time = str(item.name[0]) self.on_bar(item) data.data.apply(x1, axis=1) def debug_currenttick(self, freq): data = QA.QA_fetch_get_future_transaction_realtime( 'tdx', self.code.upper()) self.running_mode = 'backtest' self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS user = QA_User(username=self.username, password=self.password) port = user.new_portfolio(self.portfolio) self.strategy_id = self.strategy_id + \ 'currenttick_{}_{}'.format(str(datetime.date.today()), freq) self.acc = port.new_accountpro( account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type) self.positions = self.acc.get_position(self.code) data = data.assign(price=data.price/1000).loc[:, ['code', 'price', 'volume']].resample( freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna() data.columns = data.columns.droplevel(0) data = data.reset_index().set_index(['datetime', 'code']) def x1(item): self.latest_price[item.name[1]] = item['close'] if str(item.name[0])[0:10] != str(self.running_time)[0:10]: self.on_dailyclose() self.on_dailyopen() self._on_1min_bar() self._market_data.append(item) self.running_time = str(item.name[0]) self.on_bar(item) data.apply(x1, axis=1) def debug_histick(self, freq): data = QA.QA_fetch_get_future_transaction( 'tdx', self.code.upper(), self.start, self.end) self.running_mode = 'backtest' self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS user = QA_User(username=self.username, password=self.password) port = user.new_portfolio(self.portfolio) self.strategy_id = self.strategy_id + \ 'histick_{}_{}_{}'.format(self.start, self.end, freq) self.acc = port.new_accountpro( account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type) self.positions = self.acc.get_position(self.code) data = data.assign(price=data.price/1000).loc[:, ['code', 'price', 'volume']].resample( freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna() data.columns = data.columns.droplevel(0) data = data.reset_index().set_index(['datetime', 'code']) def x1(item): self.latest_price[item.name[1]] = item['close'] if str(item.name[0])[0:10] != str(self.running_time)[0:10]: self.on_dailyclose() self.on_dailyopen() self._on_1min_bar() self._market_data.append(item) self.running_time = str(item.name[0]) self.on_bar(item) data.apply(x1, axis=1) def subscribe_data(self, code, frequence, data_host, data_port, data_user, data_password, model='py'): raise NotImplementedError # """[summary] # # Arguments: # code {[type]} -- [description] # frequence {[type]} -- [description] # """ # # if frequence.endswith('min'): # if model == 'py': # self.sub = subscriber(exchange='realtime_{}_{}'.format( # frequence, code), host=data_host, port=data_port, user=data_user, password=data_password) # elif model == 'rust': # self.sub = subscriber_routing(exchange='realtime_{}'.format( # code), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password) # self.sub.callback = self.callback # elif frequence.endswith('s'): # # import re # self._num_cached = 2*int(re.findall(r'\d+', self.frequence)[0]) # self.sub = subscriber_routing( # exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password) # self.sub.callback = self.second_callback # elif frequence.endswith('tick'): # self._num_cached = 1 # self.sub = subscriber_routing( # exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password) # self.sub.callback = self.tick_callback def subscribe_multi(self, codelist, frequence, data_host, data_port, data_user, data_password, model='py'): if frequence.endswith('min'): if model == 'rust': self.sub = subscriber_routing(exchange='realtime_{}'.format( codelist[0]), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password) for item in codelist[1:]: self.sub.add_sub(exchange='realtime_{}'.format( item), routing_key=frequence) elif model == 'py': self.sub = subscriber_routing(exchange='realtime_{}'.format( codelist[0].lower()), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password) for item in codelist[1:]: self.sub.add_sub(exchange='realtime_{}'.format( item.lower()), routing_key=frequence) self.sub.callback = self.callback elif frequence.endswith('tick'): self._num_cached = 1 self.sub = subscriber_routing(exchange='CTPX', routing_key=codelist[0].lower( ), host=data_host, port=data_port, user=data_user, password=data_password) for item in codelist[1:]: self.sub.add_sub(exchange='CTPX', routing_key=item.lower()) self.sub.callback = self.tick_callback @property def old_data(self): return self._old_data def update(self): """ 此处是切换bar的时候的节点 """ self._old_data = self._market_data self._on_1min_bar() @property def market_datetime(self): """计算的market时间点 此api慎用 因为会惰性计算全市场的值 Returns: [type] -- [description] """ return self.market_data.index.levels[0] @property def market_data(self): if self.running_mode == 'sim': return self._market_data elif self.running_mode == 'backtest': return pd.concat(self._market_data[-100:], axis=1, sort=False).T def force_close(self): # 强平 if self.positions.volume_long > 0: self.send_order('SELL', 'CLOSE', price=self.positions.last_price, volume=self.positions.volume_long) if self.positions.volume_short > 0: self.send_order('BUY', 'CLOSE', price=self.positions.last_price, volume=self.positions.volume_short) def upcoming_data(self, new_bar): """upcoming_bar : 在这一步中, 我们主要进行的是 1. 更新self._market_data 2. 更新账户 3. 更新持仓 4. 通知on_bar Arguments: new_bar {pd.DataFrame} -- [description] """ code = new_bar.index.levels[1][0] if len(self._old_data) > 0: self._market_data = pd.concat( [self._old_data, new_bar], sort=False) else: self._market_data = new_bar # QA.QA_util_log_info(self._market_data) if self.isupdate: self.update() self.isupdate = False self.update_account() if isinstance(self.code, str): self.positions.on_price_change(float(self.latest_price[code])) else: for item in self.code: self.acc.get_position(item).on_price_change( float(self.latest_price[code])) self.on_bar(json.loads( new_bar.reset_index().to_json(orient='records'))[0]) def ind2str(self, ind, ind_type): z = ind.tail(1).reset_index().to_dict(orient='records')[0] return json.dumps({'topic': ind_type, 'code': self.code, 'type': self.frequence, 'data': z}) def second_callback(self, a, b, c, body): raise NotImplementedError # """在strategy的callback中,我们需要的是 # # 1. 更新数据 # 2. 更新bar # 3. 更新策略状态 # 4. 推送事件 # # Arguments: # a {[type]} -- [description] # b {[type]} -- [description] # c {[type]} -- [description] # body {[type]} -- [description] # # second ==> 2*second tick # # b'{"ask_price_1": 4145.0, "ask_price_2": 0, "ask_price_3": 0, "ask_price_4": 0, "ask_price_5": 0, # "ask_volume_1": 69, "ask_volume_2": 0, "ask_volume_3": 0, "ask_volume_4": 0, "ask_volume_5": 0, # "average_price": 61958.14258714826, # "bid_price_1": 4143.0, "bid_price_2": 0, "bid_price_3": 0, "bid_price_4": 0, "bid_price_5": 0, # "bid_volume_1": 30, "bid_volume_2": 0, "bid_volume_3": 0, "bid_volume_4": 0, "bid_volume_5": 0, # "datetime": "2019-11-20 01:57:08", "exchange": "SHFE", "gateway_name": "ctp", # "high_price": 4152.0, "last_price": 4144.0, "last_volume": 0, # "limit_down": 3872.0, "limit_up": 4367.0, "local_symbol": "ag1912.SHFE", # "low_price": 4105.0, "name": "", "open_interest": 277912.0, "open_price": 4140.0, # "preSettlementPrice": 4120.0, "pre_close": 4155.0, # "symbol": "ag1912", # "volume": 114288}' # # # tick 会基于热数据的量 self._num_cached 来判断更新/重采样 # # """ # # self.new_data = json.loads(str(body, encoding='utf-8')) # # self._cached_data.append(self.new_data) # self.latest_price[self.new_data['symbol'] # ] = self.new_data['last_price'] # # # if len(self._cached_data) == self._num_cached: # # self.isupdate = True # # # if len(self._cached_data) > 3*self._num_cached: # # 控制缓存数据量 # self._cached_data = self._cached_data[self._num_cached:] # # data = pd.DataFrame(self._cached_data).loc[:, [ # 'datetime', 'last_price', 'volume']] # data = data.assign(datetime=pd.to_datetime(data.datetime)).set_index('datetime').resample( # self.frequence).apply({'last_price': 'ohlc', 'volume': 'last'}).dropna() # data.columns = data.columns.droplevel(0) # # data = data.assign(volume=data.volume.diff(), # code=self.new_data['symbol']) # data = data.reset_index().set_index(['datetime', 'code']) # # self.acc.on_price_change( # self.new_data['symbol'], self.latest_price[self.new_data['symbol']]) # # .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']] # now = datetime.datetime.now() # if now.hour == 20 and now.minute == 59 and now.second < 10: # self.daily_func() # time.sleep(10) # # self.running_time = self.new_data['datetime'] # # print(data.iloc[-1].index[0]) # if self.dt != data.index[-1][0]: # self.isupdate = True # self.dt = data.index[-1][0] # self.upcoming_data(data.tail(1)) def tick_callback(self, a, b, c, body): self.new_data = json.loads(str(body, encoding='utf-8')) self.latest_price[self.new_data['symbol'] ] = self.new_data['last_price'] self.running_time = self.new_data['datetime'] self.on_tick(self.new_data) def get_code_marketdata(self, code): return self.market_data.loc[(slice(None), code), :] def get_current_marketdata(self): return self.market_data.loc[(self.running_time, slice(None)), :] def callback(self, a, b, c, body): raise NotImplementedError # """在strategy的callback中,我们需要的是 # # 1. 更新数据 # 2. 更新bar # 3. 更新策略状态 # 4. 推送事件 # # Arguments: # a {[type]} -- [description] # b {[type]} -- [description] # c {[type]} -- [description] # body {[type]} -- [description] # """ # # self.new_data = json.loads(str(body, encoding='utf-8')) # self.latest_price[self.new_data['code']] = self.new_data['close'] # # if self.dt != str(self.new_data['datetime'])[0:16]: # # [0:16]是分钟线位数 # self.dt = str(self.new_data['datetime'])[0:16] # self.isupdate = True # # self.acc.on_price_change(self.new_data['code'], self.new_data['close']) # # .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']] # # bar = pd.DataFrame([self.new_data]).set_index(['datetime', 'code']) # # now = datetime.datetime.now() # if now.hour == 20 and now.minute == 59 and now.second < 10: # self.daily_func() # time.sleep(10) # # # res = self.job_control.find_one( # # {'strategy_id': self.strategy_id, 'strategy_id': self.strategy_id}) # # self.control_status(res) # self.running_time = self.new_data['datetime'] # self.upcoming_data(bar) def control_status(self, res): print(res) def add_subscriber(self, qaproid): """Add a subscriber 增加订阅者的QAPRO_ID """ self.subscriber_client.insert_one( {'strategy_id': self.strategy_id, 'user_id': qaproid}) @property def subscriber_list(self): """订阅者 Returns: [type] -- [description] """ return list(set([item['user_id'] for item in self.subscriber_client.find({'strategy_id': self.strategy_id})])) def load_strategy(self): raise NotImplementedError def on_dailyopen(self): pass def on_dailyclose(self): pass def on_bar(self, bar): raise NotImplementedError def on_tick(self, tick): raise NotImplementedError def _on_1min_bar(self): #raise NotImplementedError if len(self._systemvar.keys()) > 0: self._signal.append(copy.deepcopy(self._systemvar)) try: self.on_1min_bar() except: pass def on_deal(self, order): """ order is a dict type """ print('------this is on deal message ------') print(order) def on_1min_bar(self): raise NotImplementedError def on_5min_bar(self): raise NotImplementedError def on_15min_bar(self): raise NotImplementedError def on_30min_bar(self): raise NotImplementedError def order_handler(self): self._orders = {} def daily_func(self): QA.QA_util_log_info('DAILY FUNC') def risk_check(self): pass def plot(self, name, data, format): """ plot是可以存储你的临时信息的接口, 后期会接入可视化 Arguments: name {[type]} -- [description] data {[type]} -- [description] format {[type]} -- [description] """ self._systemvar[name] = {'datetime': copy.deepcopy(str( self.running_time)), 'value': data, 'format': format} def get_code(self): if isinstance(self.code, str): return self.code else: return self.code[0] def check_order(self, direction, offset, code=None): """[summary] 同方向不开仓 只对期货市场做限制 buy - open sell - close """ if code == None: code = self.get_code() if self.market_type == QA.MARKET_TYPE.FUTURE_CN: if self.last_order_towards[code][direction] == str(offset): return False else: return True else: return True def on_ordererror(self, direction, offset, price, volume): print('order Error ') def receive_simpledeal(self, code: str, trade_time, trade_amount, direction, offset, trade_price, message='sell_open'): self.send_order(direction=direction, offset=offset, volume=trade_amount, price=trade_price, order_id=QA.QA_util_random_with_topic(self.strategy_id)) def send_order(self, direction='BUY', offset='OPEN', price=3925, volume=10, order_id='', code=None): raise NotImplementedError # if code == None: # code = self.get_code() # # towards = eval('ORDER_DIRECTION.{}_{}'.format(direction, offset)) # order_id = str(uuid.uuid4()) if order_id == '' else order_id # # if isinstance(price, float): # pass # elif isinstance(price, pd.Series): # price = price.values[0] # # if self.running_mode == 'sim': # # 在此处拦截无法下单的订单 # if (direction == 'BUY' and self.latest_price[code] <= price) or (direction == 'SELL' and self.latest_price[code] >= price): # QA.QA_util_log_info( # '============ {} SEND ORDER =================='.format(order_id)) # QA.QA_util_log_info('direction{} offset {} price{} volume{}'.format( # direction, offset, price, volume)) # # if self.check_order(direction, offset, code=code): # #self.last_order_towards = {'BUY': '', 'SELL': ''} # self.last_order_towards[code][direction] = offset # now = str(datetime.datetime.now()) # # order = self.acc.send_order( # code=code, towards=towards, price=price, amount=volume, order_id=order_id) # print(order) # order['topic'] = 'send_order' # self.pub.pub( # json.dumps(order), routing_key=self.strategy_id) # # self.acc.make_deal(order) # self.on_deal(order) # self.bar_order['{}_{}'.format( # direction, offset)] = self.bar_id # if self.send_wx: # for user in self.subscriber_list: # QA.QA_util_log_info(self.subscriber_list) # try: # requests.post('http://www.yutiansut.com/signal?user_id={}&template={}&strategy_id={}&realaccount={}&code={}&order_direction={}&order_offset={}&price={}&volume={}&order_time={}'.format( # user, "xiadan_report", self.strategy_id, self.acc.user_id, code.lower(), direction, offset, price, volume, now)) # except Exception as e: # QA.QA_util_log_info(e) # # else: # QA.QA_util_log_info('failed in ORDER_CHECK') # else: # self.on_ordererror(direction, offset, price, volume) # elif self.running_mode == 'backtest': # # self.bar_order['{}_{}'.format(direction, offset)] = self.bar_id # # if self.market_type == 'stock_cn': # order = self.acc.send_order( # code=code, amount=volume, time=self.running_time, towards=towards, price=price) # order.trade(order.order_id, order.price, # order.amount, order.datetime) # self.on_deal(order.to_dict()) # else: # self.acc.receive_simpledeal( # code=code, trade_time=self.running_time, trade_towards=towards, trade_amount=volume, trade_price=price, order_id=order_id, realorder_id=order_id, trade_id=order_id) # # self.on_deal({ # 'code': code, # 'trade_time': self.running_time, # 'trade_towards': towards, # 'trade_amount': volume, # 'trade_price': price, # 'order_id': order_id, # 'realorder_id': order_id, # 'trade_id': order_id # }) # self.positions = self.acc.get_position(code) def update_account(self): raise NotImplementedError # if self.running_mode == 'sim': # QA.QA_util_log_info('{} UPDATE ACCOUNT'.format( # str(datetime.datetime.now()))) # # self.accounts = self.acc.account_msg # self.orders = self.acc.orders # if isinstance(self.code, str): # self.positions = self.acc.get_position(self.code) # else: # pass # self.trades = self.acc.trades # self.updatetime = self.acc.dtstr # # self.on_sync() # elif self.running_mode == 'backtest': # if isinstance(self.code, str): # self.positions = self.acc.get_position(self.code) # else: # pass def get_exchange(self, code): return self.market_preset.get_exchange(code) def get_positions(self, code): if self.running_mode == 'sim': self.update_account() return self.acc.get_position(code) elif self.running_mode == 'backtest': return self.acc.get_position(code) def get_cash(self): if self.running_mode == 'sim': self.update_account() return self.accounts.get('available', '') elif self.running_mode == 'backtest': return self.acc.cash_available def run(self): while True: time.sleep(self.risk_check_gap) self.risk_check() if __name__ == '__main__': QAStrategyBase(code='rb2005').run()
websockets.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import queue import threading, os, json, time from collections import defaultdict try: from SimpleWebSocketServer import WebSocket, SimpleSSLWebSocketServer except ImportError: import sys sys.exit("install SimpleWebSocketServer") from . import util request_queue = queue.Queue() class ElectrumWebSocket(WebSocket): def handleMessage(self): assert self.data[0:3] == 'id:' util.print_error("message received", self.data) request_id = self.data[3:] request_queue.put((self, request_id)) def handleConnected(self): util.print_error("connected", self.address) def handleClose(self): util.print_error("closed", self.address) class WsClientThread(util.DaemonThread): def __init__(self, config, network): util.DaemonThread.__init__(self) self.network = network self.config = config self.response_queue = Queue.Queue() self.subscriptions = defaultdict(list) def make_request(self, request_id): # read json file rdir = self.config.get('requests_dir') n = os.path.join(rdir, 'req', request_id[0], request_id[1], request_id, request_id + '.json') with open(n) as f: s = f.read() d = json.loads(s) addr = d.get('address') amount = d.get('amount') return addr, amount def reading_thread(self): while self.is_running(): try: ws, request_id = request_queue.get() except Queue.Empty: continue try: addr, amount = self.make_request(request_id) except: continue l = self.subscriptions.get(addr, []) l.append((ws, amount)) self.subscriptions[addr] = l h = self.network.addr_to_scripthash(addr) self.network.send([('blockchain.scripthash.subscribe', [h])], self.response_queue.put) def run(self): threading.Thread(target=self.reading_thread).start() while self.is_running(): try: r = self.response_queue.get(timeout=0.1) except Queue.Empty: continue util.print_error('response', r) method = r.get('method') params = r.get('params') result = r.get('result') if result is None: continue if method == 'blockchain.scripthash.subscribe': self.network.send([('blockchain.scripthash.get_balance', params)], self.response_queue.put) elif method == 'blockchain.scripthash.get_balance': h = params[0] addr = self.network.h2addr.get(h, None) if addr is None: util.print_error("can't find address for scripthash: %s" % h) l = self.subscriptions.get(addr, []) for ws, amount in l: if not ws.closed: if sum(result.values()) >=amount: ws.sendMessage('paid') class WebSocketServer(threading.Thread): def __init__(self, config, ns): threading.Thread.__init__(self) self.config = config self.net_server = ns self.daemon = True def run(self): t = WsClientThread(self.config, self.net_server) t.start() host = self.config.get('websocket_server') port = self.config.get('websocket_port', 9999) certfile = self.config.get('ssl_chain') keyfile = self.config.get('ssl_privkey') self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile) self.server.serveforever()
worker.py
# -*- coding:utf-8 -*- from Queue import Queue, Empty from threading import Thread from os import path, makedirs from logging import getLogger from threading import Lock logger = getLogger(__name__) fail_logger = getLogger('migrate_tool.fail_file') class Worker(object): def __init__(self, work_dir, file_filter, input_service, output_service, threads_num=5, max_size=30): self._input_service = input_service self._output_service = output_service self._filter = file_filter self._work_dir = work_dir self._threads_num = threads_num self._threads_pool = [] self._queue = Queue(maxsize=max_size) self._stop = False self._succ = 0 self._fail = 0 self._lock = Lock() def __work_thread(self): while not self._stop: # logger.info("worker stop: " + str(self._stop)) try: # logger.debug("try to get task") task = self._queue.get_nowait() # logger.debug("get task succeefully") self._queue.task_done() except Empty: logger.debug("Empty queue" + str(self._stop)) if self._stop: break else: import time time.sleep(1) continue task_path = task.key if task_path.startswith('/'): task_path = task_path[1:] if isinstance(task_path, str): task_path = task_path.decode('utf-8') import uuid localpath = unicode(path.join(self._work_dir, uuid.uuid1().hex)) try: try: makedirs(path.dirname(localpath)) except OSError as e: # directory is exists logger.debug(str(e)) try: ret = self._input_service.exists(task) if ret: logger.info("{file_path} exists".format(file_path=task_path.encode('utf-8'))) with self._lock: self._succ += 1 self._filter.add(task_path) continue except Exception as e: logger.exception("exists failed") try: self._output_service.download(task, localpath) except Exception as e: logger.exception("download failed") self._fail += 1 fail_logger.error(task_path) continue try: self._input_service.upload(task, localpath) except Exception: logger.exception("upload {} failed".format(task_path.encode('utf-8'))) self._fail += 1 fail_logger.error(task_path) continue try: import os if isinstance(localpath, unicode): localpath = localpath.encode('utf-8') os.remove(localpath) try: os.removedirs(path.dirname(localpath)) except OSError: pass except Exception as e: logger.exception(str(e)) continue if isinstance(task_path, unicode): logger.info("inc succ with {}".format(task_path.encode('utf-8'))) else: logger.info("inc succ with {}".format(task_path.encode('utf-8'))) with self._lock: self._succ += 1 self._filter.add(task_path) except Exception: logger.exception("try except for deleting file") finally: import os if isinstance(localpath, unicode): localpath = localpath.encode('utf-8') try: os.remove(localpath) os.removedirs(path.dirname(localpath)) except OSError: pass def add_task(self, task): # blocking self._queue.put(task) def start(self): self._threads_pool = [Thread(target=self.__work_thread) for _ in range(self._threads_num)] for t in self._threads_pool: t.start() def stop(self): self._queue.join() self.term() def term(self): self._stop = True logger.info("try to stop migrate process.") # while any([t.is_alive() for t in self._threads_pool]): # map(lambda i: i.join(5), filter(lambda j: j.is_alive(), self._threads_pool)) # print filter(lambda j: j.is_alive(), self._threads_pool) map(lambda i: i.join(), self._threads_pool) @property def success_num(self): return self._succ @property def failure_num(self): return self._fail
test_runserver_main.py
import asyncio import json import os import signal import time from multiprocessing import Process from unittest import mock import aiohttp import pytest from aiohttp.web import Application from pytest_toolbox import mktree from aiohttp_devtools.runserver import run_app, runserver from aiohttp_devtools.runserver.config import Config from aiohttp_devtools.runserver.serve import create_auxiliary_app, modify_main_app, src_reload, start_main_app from .conftest import SIMPLE_APP, get_if_boxed, get_slow slow = get_slow(pytest) if_boxed = get_if_boxed(pytest) async def check_server_running(loop, check_callback): port_open = False async with aiohttp.ClientSession(loop=loop, conn_timeout=1) as session: for i in range(50): try: async with session.get('http://localhost:8000/'): pass except OSError: await asyncio.sleep(0.1, loop=loop) else: port_open = True break assert port_open await check_callback(session) @if_boxed @slow def test_start_runserver(tmpworkdir, smart_caplog): mktree(tmpworkdir, { 'app.py': """\ from aiohttp import web async def hello(request): return web.Response(text='<h1>hello world</h1>', content_type='text/html') async def has_error(request): raise ValueError() def create_app(loop): app = web.Application() app.router.add_get('/', hello) app.router.add_get('/error', has_error) return app""", 'static_dir/foo.js': 'var bar=1;', }) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) aux_app, aux_port, _ = runserver(app_path='app.py', static_path='static_dir') assert isinstance(aux_app, aiohttp.web.Application) assert aux_port == 8001 for startup in aux_app.on_startup: loop.run_until_complete(startup(aux_app)) async def check_callback(session): async with session.get('http://localhost:8000/') as r: assert r.status == 200 assert r.headers['content-type'].startswith('text/html') text = await r.text() assert '<h1>hello world</h1>' in text assert '<script src="http://localhost:8001/livereload.js"></script>' in text async with session.get('http://localhost:8000/error') as r: assert r.status == 500 assert 'raise ValueError()' in (await r.text()) try: loop.run_until_complete(check_server_running(loop, check_callback)) finally: for shutdown in aux_app.on_shutdown: loop.run_until_complete(shutdown(aux_app)) assert ( 'adev.server.dft INFO: Starting aux server at http://localhost:8001 ◆\n' 'adev.server.dft INFO: serving static files from ./static_dir/ at http://localhost:8001/static/\n' 'adev.server.dft INFO: Starting dev server at http://localhost:8000 ●\n' ) in smart_caplog @if_boxed @slow def test_start_runserver_app_instance(tmpworkdir, loop): mktree(tmpworkdir, { 'app.py': """\ from aiohttp import web async def hello(request): return web.Response(text='<h1>hello world</h1>', content_type='text/html') app = web.Application() app.router.add_get('/', hello) """ }) asyncio.set_event_loop(loop) aux_app, aux_port, _ = runserver(app_path='app.py', host='foobar.com') assert isinstance(aux_app, aiohttp.web.Application) assert aux_port == 8001 assert len(aux_app.on_startup) == 2 assert len(aux_app.on_shutdown) == 2 def kill_parent_soon(pid): time.sleep(0.2) os.kill(pid, signal.SIGINT) @if_boxed @slow def test_run_app(loop, aiohttp_unused_port): app = Application() port = aiohttp_unused_port() Process(target=kill_parent_soon, args=(os.getpid(),)).start() run_app(app, port, loop) @if_boxed async def test_run_app_aiohttp_client(tmpworkdir, aiohttp_client): mktree(tmpworkdir, SIMPLE_APP) config = Config(app_path='app.py') app_factory = config.import_app_factory() app = app_factory() modify_main_app(app, config) assert isinstance(app, aiohttp.web.Application) cli = await aiohttp_client(app) r = await cli.get('/') assert r.status == 200 text = await r.text() assert text == 'hello world' async def test_aux_app(tmpworkdir, aiohttp_client): mktree(tmpworkdir, { 'test.txt': 'test value', }) app = create_auxiliary_app(static_path='.') cli = await aiohttp_client(app) r = await cli.get('/test.txt') assert r.status == 200 text = await r.text() assert text == 'test value' @if_boxed @slow async def test_serve_main_app(tmpworkdir, loop, mocker): asyncio.set_event_loop(loop) mktree(tmpworkdir, SIMPLE_APP) mock_modify_main_app = mocker.patch('aiohttp_devtools.runserver.serve.modify_main_app') loop.call_later(0.5, loop.stop) config = Config(app_path='app.py') await start_main_app(config, loop) mock_modify_main_app.assert_called_with(mock.ANY, config) @if_boxed @slow async def test_start_main_app_app_instance(tmpworkdir, loop, mocker): mktree(tmpworkdir, { 'app.py': """\ from aiohttp import web async def hello(request): return web.Response(text='<h1>hello world</h1>', content_type='text/html') app = web.Application() app.router.add_get('/', hello) """ }) mock_modify_main_app = mocker.patch('aiohttp_devtools.runserver.serve.modify_main_app') config = Config(app_path='app.py') await start_main_app(config, loop) mock_modify_main_app.assert_called_with(mock.ANY, config) @pytest.yield_fixture def aux_cli(aiohttp_client, loop): app = create_auxiliary_app(static_path='.') cli = loop.run_until_complete(aiohttp_client(app)) yield cli loop.run_until_complete(cli.close()) async def test_websocket_hello(aux_cli, smart_caplog): async with aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) as ws: await ws.send_json({'command': 'hello', 'protocols': ['http://livereload.com/protocols/official-7']}) async for msg in ws: assert msg.type == aiohttp.WSMsgType.text data = json.loads(msg.data) assert data == { 'serverName': 'livereload-aiohttp', 'command': 'hello', 'protocols': ['http://livereload.com/protocols/official-7'] } break # noqa assert 'adev.server.aux WARNING: browser disconnected, appears no websocket connection was made' in smart_caplog async def test_websocket_info(aux_cli, loop): assert len(aux_cli.server.app['websockets']) == 0 ws = await aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) try: await ws.send_json({'command': 'info', 'url': 'foobar', 'plugins': 'bang'}) await asyncio.sleep(0.05, loop=loop) assert len(aux_cli.server.app['websockets']) == 1 finally: await ws.close() async def test_websocket_bad(aux_cli, smart_caplog): async with aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) as ws: await ws.send_str('not json') async with aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) as ws: await ws.send_json({'command': 'hello', 'protocols': ['not official-7']}) async with aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) as ws: await ws.send_json({'command': 'boom', 'url': 'foobar', 'plugins': 'bang'}) async with aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) as ws: await ws.send_bytes(b'this is bytes') assert 'adev.server.aux ERROR: live reload protocol 7 not supported' in smart_caplog.log assert 'adev.server.aux ERROR: JSON decode error' in smart_caplog.log assert 'adev.server.aux ERROR: Unknown ws message' in smart_caplog.log assert "adev.server.aux ERROR: unknown websocket message type binary, data: b'this is bytes'" in smart_caplog async def test_websocket_reload(aux_cli, loop): reloads = await src_reload(aux_cli.server.app, 'foobar') assert reloads == 0 ws = await aux_cli.session.ws_connect(aux_cli.make_url('/livereload')) try: await ws.send_json({ 'command': 'info', 'url': 'foobar', 'plugins': 'bang', }) await asyncio.sleep(0.05, loop=loop) reloads = await src_reload(aux_cli.server.app, 'foobar') assert reloads == 1 finally: await ws.close()
main.py
#!/usr/bin/python3 from threading import Thread import datetime as dt import math import time # functions def thread_function(time): btime = dt.datetime.now() while dt.datetime.now() < btime + dt.timedelta(seconds=time): value = math.sin(dt.datetime.now().second) return # main print('Start thread function alone (time=3.0)', flush=True) thread_function(3.0) print('Sleep 3.0 seconds', flush=True) time.sleep(3.0) n = 4 print('Start %i threads' % n, flush=True) ths = [ Thread(target=thread_function, args=[10.0]) for i in range(n) ] for th in ths: th.start() print('Join threads...', flush=True) for th in ths: th.join() print('END', flush=True) # END
preproc_train2.py
import load_brats import numpy as np import pickle import random import itertools import multiprocessing as mp from argparse import ArgumentParser from directories import * def create_input(data, start_idx, end_idx): z3 = start_idx[0] x3 = start_idx[1] y3 = start_idx[2] z4 = data.shape[1] - end_idx[0] x4 = data.shape[2] - end_idx[1] y4 = data.shape[3] - end_idx[2] p = abs(min(0,z3,x3,y3,z4,x4,y4)) data_pad = np.zeros(np.add(data.shape,[0,2*p,2*p,2*p])) if p!=0: data_pad[:,p:-p,p:-p,p:-p] = data else: data_pad = data z3 = start_idx[0] + p x3 = start_idx[1] + p y3 = start_idx[2] + p z4 = end_idx[0] + p x4 = end_idx[1] + p y4 = end_idx[2] + p output = data_pad[:, z3:z4, x3:x4, y3:y4] assert output.shape == (6,124,124, 124), "Wrong size" return output def create_label_input_random(set, n): data = set['data'] bbox_tumor = set['bbox_tumor'] bbox_brain = set['bbox_brain'] shape_pred = np.array([36, 36, 36]) shape_inputs = np.array([124, 124, 124]) d_shape = (np.divide((shape_inputs - shape_pred), 2)).astype(np.int16) t = bbox_tumor bbox_start = np.array([t[0], t[2], t[4]]) bbox_size = np.array([t[1] - t[0] - shape_pred[0], t[3] - t[2] - shape_pred[1], t[5] - t[4] - shape_pred[2]]) # the prediction will now be 100% in the tumor bounding box # add a little bit of padding pad = 20 bbox_start = bbox_start - pad bbox_size = bbox_size + 2 * pad # absolute position in cropped image t2 = bbox_brain start_pred = bbox_start - np.array([t2[0], t2[2], t2[4]]) inputs = [] labels = [] for i in range(n): # choose random box in bounding box if bbox_size[0] > 0: i1 = random.randint(0, bbox_size[0]) else: i1 = 0 if bbox_size[1] > 0: i2 = random.randint(0, bbox_size[1]) else: i2 = 0 if bbox_size[2] > 0: i3 = random.randint(0, bbox_size[2]) else: i3 = 0 z1 = start_pred[0] + i1 x1 = start_pred[1] + i2 y1 = start_pred[2] + i3 z2 = z1 + shape_pred[0] x2 = x1 + shape_pred[1] y2 = y1 + shape_pred[2] z3 = z1 - d_shape[0] x3 = x1 - d_shape[0] y3 = y1 - d_shape[0] z4 = z2 + d_shape[0] x4 = x2 + d_shape[0] y4 = y2 + d_shape[0] input = create_input(data, [z3, x3, y3], [z4, x4, y4]) label = input[5,44:-44,44:-44,44:-44] label = np.expand_dims(label,0) input = input[:5, :, :, :] input = np.expand_dims(input, axis=0) input = np.float32(input) label = np.int64(label) label = np.eye(4)[label] label = np.moveaxis(label, -1, 1) label = np.float32(label) inputs.append(input) labels.append(label) return inputs, labels def create_label_input_complete(set): data = set['data'] bbox_tumor = set['bbox_tumor'] bbox_brain = set['bbox_brain'] shape_pred = np.array([36, 36, 36]) shape_inputs = np.array([124, 124, 124]) d_shape = (np.divide((shape_inputs - shape_pred), 2)).astype(np.int16) t = bbox_tumor bbox_start = np.array([t[0], t[2], t[4]]) bbox_size = np.array([t[1] - t[0] - shape_pred[0], t[3] - t[2] - shape_pred[1], t[5] - t[4] - shape_pred[2]]) # the prediction will now be 100% in the tumor bounding box # add a little bit of padding pad = 0 bbox_start = bbox_start - pad bbox_size = bbox_size + 2 * pad # absolute position in cropped image t2 = bbox_brain start_pred = bbox_start - np.array([t2[0], t2[2], t2[4]]) bbox_size[bbox_size<0]=0 labels = [] inputs = [] dbf = np.zeros(data.shape[1:]) if not np.array_equal(np.unique(data[5,:,:,:]), [0.,1.,2.,3.]): print("NOT ALL CLASSES In %s" %set['name']) return inputs, labels #for stepsize in [20,18,12,10,8,5]: stepsize1 = max(np.int(np.rint((bbox_size[0]+36) / 4. )),1) stepsize2 = max(np.int(np.rint((bbox_size[1]+36) / 4. )),1) stepsize3 = max(np.int(np.rint((bbox_size[2]+36) / 4. )),1) for (i1,i2,i3) in itertools.product(range(0,bbox_size[0]+36,stepsize1), range(0,bbox_size[1]+36,stepsize2), range(0,bbox_size[1]+36,stepsize3)): # choose random box in bounding box z1 = start_pred[0] + i1 x1 = start_pred[1] + i2 y1 = start_pred[2] + i3 z2 = z1 + shape_pred[0] x2 = x1 + shape_pred[1] y2 = y1 + shape_pred[2] z3 = z1 - d_shape[0] x3 = x1 - d_shape[0] y3 = y1 - d_shape[0] z4 = z2 + d_shape[0] x4 = x2 + d_shape[0] y4 = y2 + d_shape[0] input = create_input(data, [z3, x3, y3], [z4, x4, y4]) dbf[z3+44:z4-44,x3+44:x4-44,y3+44:y4-44]=1 label = input[5,44:-44,44:-44,44:-44] # Check if data is adequate for training uniques, counts = np.unique(label, return_counts=True) if not np.array_equal(uniques, [0., 1., 2., 3.]): continue if not np.all(np.greater(counts, [320, 320, 320, 320])): # 1 Patch = 32^3 = 32768 => 0.1% = 32.8 continue input = input[:5, :, :, :] input = np.expand_dims(input, axis=0) input = np.float32(input) label = np.expand_dims(label,0) label = np.int32(label) label = np.eye(4)[label] label = np.moveaxis(label, -1, 1) label = np.float32(label) inputs.append(input) labels.append(label) print("%i datasets from %s, stepsize=%i, %i, %i" %(labels.__len__(),set['name'], stepsize1, stepsize2, stepsize3)) return inputs, labels def get_trainset(small_training=False): input_pickles = [] label_pickles = [] for s in os.listdir(savedir_preproc_train2): if s.startswith('input'): input_pickles.append(s) elif s.startswith('label'): label_pickles.append(s) else: print("wrong file!") break input_pickles.sort() label_pickles.sort() if small_training==True: input_pickles = input_pickles[:2] label_pickles = label_pickles[:2] inputs = [] labels = [] for input, label in zip(input_pickles, label_pickles): cur_inputs = pickle.load(open(opj(savedir_preproc_train2, input), "rb")) cur_labels = pickle.load(open(opj(savedir_preproc_train2, label), "rb")) inputs.extend(cur_inputs) labels.extend(cur_labels) return inputs, labels def main_random_sp(train_sets, n=20): for train_set in train_sets: inputs, labels = create_label_input_random(train_set, n) pickle.dump(inputs, open(opj(savedir_preproc_train2, 'inputs_' + train_set['name'] + '.p'), 'wb')) pickle.dump(labels, open(opj(savedir_preproc_train2, 'labels_' + train_set['name'] + '.p'), 'wb')) print("random processed: " + train_set['name']) return 0 def main_patches(train_sets): for train_set in train_sets: inputs, labels = create_label_input_complete(train_set) pickle.dump(inputs, open(opj(savedir_preproc_train2, 'inputs_' + train_set['name'] + '.p'), 'wb')) pickle.dump(labels, open(opj(savedir_preproc_train2, 'labels_' + train_set['name'] + '.p'), 'wb')) print("Patch-based processed: " + train_set['name']) return 0 def main(num_processes=4, mode='random'): train_sets = load_brats.load_normalized_data(dir=savedir_preproc_train1) list_of_lists = [] for i in range(num_processes): tmp_list=train_sets[i::num_processes] list_of_lists.append(tmp_list) if mode == 'random': target = main_random_sp processes = [mp.Process(target=target, args=([mylist])) for mylist in list_of_lists] elif mode == 'patches': target = main_patches processes = [mp.Process(target=target, args=([mylist])) for mylist in list_of_lists] else: print("Wrong mode!") return for p in processes: p.start() for p in processes: p.join() print("all finished!") if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-n", "--num_procs", dest="num_processes", type=int, default=4) parser.add_argument("-m", "--mode", dest="mode", default='random', help='random or patches - how the training patches are chosen from the tumor region: ' 'either random sampling or in a grid fashion') args = parser.parse_args() main(num_processes=args.num_processes, mode=args.mode)
self.py
# -*- coding: utf-8 -*- import PEPEN from PEPEN.lib.curve.ttypes import * from datetime import datetime import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile from bs4 import BeautifulSoup from urllib import urlopen import requests from io import StringIO from threading import Thread #from gtts import gTTS from googletrans import Translator #JANGAN LUPA => sudo pip install bs4 => sudo pip install BeautifulSoup => sudo pip install urllib kr = PEPEN.LINE() #kr.login(qr=True) kr.login(token='EpO34R9D9YhD2nEU9OTa.y8oR0MsJAblb0TnVHZHSwG.kfTBf7Uidg/WE+6ECsZSPnTHL2R/xpuoikneDdfXS7Y=')#chery kr.loginResult() print "Udh Kocak Loginnyaaa :v" reload(sys) sys.setdefaultencoding('utf-8') helpmsg ="""℘℮℘єn вσtѕ =========================== line://ti/p/~pepenlagiboker ~~~~~~ cσmmαnd ~~~~~~ ❂➣google (text) ❂➣playstore (text) ❂➣instagram (username) ❂➣wikipedia (text) ❂➣idline (text) ❂➣time ❂➣image (text) ❂➣runtime ❂➣Restart ❂➣lirik (text) ❂➣Nah/tagall (mention) ❂➣cctv on/off (lurk) ❂➣toong (lurker) ❂➣protect on/off ❂➣Qr on/off ❂➣invite on/off ❂➣cancel on/off ❂➣Simisimi on/off ❂➣Read on/off ❂➣getinfo @ ❂➣getcontact @ ❂➣Cium @ ❂➣Sp/Speed ❂➣Friendlist ❂➣id@en ❂➣en@id ❂➣id@jp ❂➣Mode օղ/օƒƒ ❂➣keybot """ keymsg =""" ~~~~~~~ command key ~~~~~~ ❂➣ƙɛყ℘γơ ❂➣ƙɛყʂɛιʄ ❂➣ƙɛყɠγų℘ ❂➣ƙɛყʂɛɬ ❂➣ƙɛყɬγąŋ ❂➣ɱơɖɛ ơŋ/ơʄʄ ~~~~~~~~~~~~~~~~~~~~~~~~~~ """ helppro =""" =========================== line://ti/p/~pepenlagiboker ~~~~~~~ command pro ~~~~~~ ❂➣мσ∂є σи/σff ❂➣ρяσтє¢т σи/σff ❂➣qя σи/σff ❂➣ιиνιтє σи/σff ❂➣¢αи¢єℓ σи/σff ~~~~~~~~~~~~~~~~~~~~~~~~~~ """ helpself =""" =========================== line://ti/p/~pepenlagiboker ~~~~~~~ command pro ~~~~~~ ❂➣Me ❂➣ʍʏռǟʍɛ: ❂➣ʍʏɮɨօ: ❂➣ʍʏքɨċt ❂➣ʍʏċօʋɛʀ ❂➣ʍʏ ċօքʏ @ ❂➣my ɮǟċkʊք ❂➣ɢɛtɢʀօʊք ɨʍǟɢɛ ❂➣ɢɛtʍɨɖ @ ❂➣getprofile @ ❂➣getinfo @ ❂➣getname @ ❂➣getbio @ ❂➣getpict @ ❂➣Getcover @ ❂➣nah (ʍɛռtɨօռ) ❂➣ċċtʋ օռ/օʄʄ (ʟʊʀkɨռɢ) ❂➣ɨռtɨք/tօօռɢ (ʟʊʀkɛʀֆ) ❂➣ʍɨċǟɖɖ @ ❂➣micdel @ ❂➣mimic օռ/օʄʄ ❂➣ʍɨċʟɨֆt ~~~~~~~~~~~~~~~~~~~~~~~~~~ """ keyset =""" =========================== line://ti/p/~pepenlagiboker ======== Keyset ======== ❂➣contact on/off ❂➣autojoin on/off ❂➣auto leave on/off ❂➣autoadd on/off ❂➣like friend ❂➣link on ❂➣respon on/off ❂➣read on/off ❂➣simisimi on/off ❂➣Sambut on/off ❂➣Pergi on/off ❂➣Respontag on/off ❂➣Kicktag on/off """ helpgrup =""" =========================== line://ti/p/~pepenlagiboker ======== KeyGruP ====== ❂➣Link on ❂➣Url ❂➣Cancel ❂➣Gcreator ❂➣Kick @ ❂➣Cium @ ❂➣Gname: ❂➣Gbroadcast: ❂➣Cbroadcast: ❂➣Infogrup ❂➣Gruplist ❂➣Friendlist ❂➣Blacklist ❂➣Ban @ ❂➣Unban @ ❂➣Clearban ❂➣Banlist ❂➣Contact ban ❂➣Midban """ helptranslate =""" =========================== line://ti/p/~pepenlagiboker ======= KeyTran ===== ❂➣Id@en ❂➣En@id ❂➣Id@jp ❂➣Jp@id ❂➣Id@th ❂➣Th@id ❂➣Id@ar ❂➣Ar@id ❂➣Id@ko ❂➣Ko@id ❂➣Say-id ❂➣Say-en ❂➣Say-jp """ KAC=[kr] mid = kr.getProfile().mid Bots=[mid] owner=["u63c3abd264a36812c1d2d36f87386c5a",mid] admin=["u63c3abd264a36812c1d2d36f87386c5a",mid] baby=["u63c3abd264a36812c1d2d36f87386c5a"]#chery/barby/ranita wait = { 'likeOn':False, 'alwayRead':False, 'detectMention':True, 'kickMention':False, 'steal':True, 'pap':{}, 'invite':{}, 'spam':{}, 'contact':False, 'autoJoin':True, 'autoCancel':{"on":False,"members":5}, 'leaveRoom':True, 'timeline':False, 'autoAdd':True, 'message':"""Makasih Dah Add Saya Ya Kakak ^_^""", "lang":"JP", "comment":"👉ąµţ๏ℓɨЌ€ By Pepen", "commentOn":False, "commentBlack":{}, "wblack":False, "dblack":False, "clock":False, "cNames":"", "cNames":"", "Wc":False, "Lv":False, "MENTION":True, "blacklist":{}, "wblacklist":False, "dblacklist":False, "protect":False, "cancelprotect":False, "inviteprotect":False, "linkprotect":False, } wait2 = { 'readPoint':{}, 'readMember':{}, 'setTime':{}, 'ROM':{} } mimic = { "copy":False, "copy2":False, "status":False, "target":{} } settings = { "simiSimi":{} } setTime = {} setTime = wait2['setTime'] mulai = time.time() contact = kr.getProfile() mybackup = kr.getProfile() mybackup.displayName = contact.displayName mybackup.statusMessage = contact.statusMessage mybackup.pictureStatus = contact.pictureStatus contact = kr.getProfile() backup = kr.getProfile() backup.displayName = contact.displayName backup.statusMessage = contact.statusMessage backup.pictureStatus = contact.pictureStatus contact = kr.getProfile() profile = kr.getProfile() profile.displayName = contact.displayName profile.statusMessage = contact.statusMessage profile.pictureStatus = contact.pictureStatus mulai = time.time() agent = {'User-Agent' : "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} def translate(to_translate, to_language="auto", language="auto"): bahasa_awal = "auto" bahasa_tujuan = to_language kata = to_translate url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] return result def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib,request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" #Finding 'Next Image' from the given raw page def _images_get_next_item(s): start_line = s.find('rg_di') if start_line == -1: #If no links are found then give an error! end_quote = 0 link = "no_links" return link, end_quote else: start_line = s.find('"class="rg_meta"') start_content = s.find('"ou"',start_line+90) end_content = s.find(',"ow"',start_content-90) content_raw = str(s[start_content+6:end_content-1]) return content_raw, end_content #Getting all links with the help of '_images_get_next_image' def _images_get_all_items(page): items = [] while True: item, end_content = _images_get_next_item(page) if item == "no_links": break else: items.append(item) #Append all the links in the list named 'Links' time.sleep(0.1) #Timer could be used to slow down the request for image downloads page = page[end_content:] return items #def autolike(): # for zx in range(0,100): # hasil = kr.activity(limit=100) # if hasil['result']['posts'][zx]['postInfo']['liked'] == False: # try: # kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) # kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By TobyBots!!\nID LINE : line://ti/p/~tobyg74\nIG : instagram.com/tobygaming74") # print "DiLike" # except: # pass # else: # print "Sudah DiLike" # time.sleep(500) #thread2 = threading.Thread(target=autolike) #thread2.daemon = True #thread2.start() #def autolike(): # for zx in range(0,100): # hasil = kr.activity(limit=100) # if hasil['result']['posts'][zx]['postInfo']['liked'] == False: # try: # kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) # kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««") # print "Like" # except: # pass # else: # print "Already Liked" #time.sleep(500) #thread2 = threading.Thread(target=autolike) #thread2.daemon = True #thread2.start() def yt(query): with requests.session() as s: isi = [] if query == "": query = "S1B tanysyz" s.headers['user-agent'] = 'Mozilla/5.0' url = 'http://www.youtube.com/results' params = {'search_query': query} r = s.get(url, params=params) soup = BeautifulSoup(r.content, 'html5lib') for a in soup.select('.yt-lockup-title > a[title]'): if '&list=' not in a['href']: if 'watch?v' in a['href']: b = a['href'].replace('watch?v=', '') isi += ['youtu.be' + b] return isi def waktu(secs): mins, secs = divmod(secs,60) hours, mins = divmod(mins,60) return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs) def upload_tempimage(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' config = { 'album': album, 'name': 'bot auto upload', 'title': 'bot auto upload', 'description': 'bot auto upload' } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def sendImage(self, to_, path): M = Message(to=to_,contentType = 1) M.contentMetadata = None M.contentPreview = None M_id = self.Talk.client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'image', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload image failure.') return True def sendImageWithURL(self, to_, url): path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to_, path) except Exception as e: raise e def restart_program(): python = sys.executable os.execl(python, python, * sys.argv) def post_content(self, urls, data=None, files=None): return self._session.post(urls, headers=self._headers, data=data, files=files) def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def NOTIFIED_READ_MESSAGE(op): try: if op.param1 in wait2['readPoint']: Name = kr.getContact(op.param2).displayName if Name in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += "\n9§9" + Name wait2['ROM'][op.param1][op.param2] = "9§9" + Name else: pass except: pass def sendAudio(self, to_, path): M = Message(to=to_, text=None, contentType = 3) M_id = self.Talk.client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) print r if r.status_code != 201: raise Exception('Upload audio failure.') def sendAudioWithURL(self, to_, url): path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download audio failure.') try: self.sendAudio(to_, path) except Exception as e: raise e def sendVoice(self, to_, path): M = Message(to=to_, text=None, contentType = 3) M.contentPreview = None M_id = self._client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'voice_message', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload voice failure.') return True def mention(to,nama): aa = "" bb = "" strt = int(12) akh = int(12) nm = nama #print nm for mm in nm: akh = akh + 2 aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},""" strt = strt + 6 akh = akh + 4 bb += "► @c \n" aa = (aa[:int(len(aa)-1)]) msg = Message() msg.to = to msg.text = "「Mention」\n"+bb msg.contentMetadata = {'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'} #print msg try: kr.sendMessage(msg) except Exception as error: print error def removeAllMessages(self, lastMessageId): return self._client.removeAllMessages(0, lastMessageId) def summon(to, nama): aa = "" bb = "" strt = int(14) akh = int(14) nm = nama for mm in nm: akh = akh + 2 aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},""" strt = strt + 6 akh = akh + 4 bb += "\xe2\x95\xa0 @x \n" aa = (aa[:int(len(aa)-1)]) msg = Message() msg.to = to msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90" msg.contentMetadata ={"MENTION":'{"MENTIONEES":['+aa+']}','EMTVER':'4'} print "[Command] Tag All" try: kr.sendMessage(msg) except Exception as error: print error def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX... tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"] for texX in tex: for command in commands: if string ==command: return True return False def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def bot(op): try: if op.type == 0: return if op.type == 5: if wait['autoAdd'] == True: kr.findAndAddContactsByMid(op.param1) if (wait['message'] in [""," ","\n",None]): pass else: kr.sendText(op.param1,str(wait['message'])) if op.type == 26: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: kr.sendText(msg.to,text) if op.type == 13: print op.param3 if op.param3 in mid: if op.param2 in admin: kr.acceptGroupInvitation(op.param1) if op.type == 13: if mid in op.param3: if wait['autoJoin'] == True: if op.param2 in Bots or admin: kr.acceptGroupInvitation(op.param1) else: kr.rejectGroupInvitation(op.param1) else: print "autoJoin is Off" if op.type == 19: if op.param3 in admin: kr.kickoutFromGroup(op.param1,[op.param2]) kr.inviteIntoGroup(op.param1,admin) kr.inviteIntoGroup(op.param1,[op.param3]) else: pass if op.type == 19: if op.param3 in baby: kr.kickoutFromGroup(op.param1,[op.param2]) kr.inviteIntoGroup(op.param1,baby) kr.inviteIntoGroup(op.param1,[op.param3]) else: pass if op.type == 19: if op.param3 in baby: if op.param2 in baby: kr.inviteIntoGroup(op.param1,baby) kr.inviteIntoGroup(op.param1,[op.param3]) if op.type == 19: if mid in op.param3: wait["blacklist"][op.param2] = True if op.type == 22: if wait['leaveRoom'] == True: kr.leaveRoom(op.param1) if op.type == 24: if wait['leaveRoom'] == True: kr.leaveRoom(op.param1) if op.type == 26: msg = op.message if msg.toType == 0: msg.to = msg.from_ if msg.from_ == mid: if "join:" in msg.text: list_ = msg.text.split(":") try: kr.acceptGroupInvitationByTicket(list_[1],list_[2]) G = kr.getGroup(list_[1]) G.preventJoinByTicket = True kr.updateGroup(G) except: kr.sendText(msg.to,"error") if msg.toType == 1: if wait['leaveRoom'] == True: kr.leaveRoom(msg.to) if msg.contentType == 16: url = msg.contentMetadata["postEndUrl"] kr.like(url[25:58], url[66:], likeType=1001) if op.type == 26: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: kr.sendText(msg.to,text) if op.type == 26: msg = op.message if msg.to in settings["simiSimi"]: if settings["simiSimi"][msg.to] == True: if msg.text is not None: text = msg.text r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt") data = r.text data = json.loads(data) if data["status"] == 200: if data['result']['result'] == 100: kr.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8')) if "MENTION" in msg.contentMetadata.keys() != None: if wait['detectMention'] == True: contact = kr.getContact(msg.from_) cName = contact.displayName balas = ["Don't Tag Me! iam Bussy!, ",cName + "Ada perlu apa, ?",cName + " pc aja klo urgent! sedang sibuk,", "kenapa, ", cName + " kangen?","kangen bilang gak usah tag tag, " + cName, "knp?, " + cName, "apasi?, " + cName + "?", "pulang gih, " + cName + "?","aya naon, ?" + cName + "Tersangkut -_-"] ret_ = "." + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata["MENTION"]) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: kr.sendText(msg.to,ret_) break if "MENTION" in msg.contentMetadata.keys() != None: if wait['kickMention'] == True: contact = kr.getContact(msg.from_) cName = contact.displayName balas = ["Dont Tag Me!! Im Busy, ",cName + " Ngapain Ngetag?, ",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja, ", "-_-, ","Pepen lagi off, ", cName + " Kenapa Tag saya?, ","SPAM PC aja, " + cName, "Jangan Suka Tag gua, " + cName, "Kamu siapa, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., "] ret_ = "[Auto Respond] " + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata["MENTION"]) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: kr.sendText(msg.to,ret_) kr.kickoutFromGroup(msg.to,[msg.from_]) break if msg.contentType == 13: if wait['invite'] == True: _name = msg.contentMetadata["displayName"] invite = msg.contentMetadata["mid"] groups = kr.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: kr.sendText(msg.to, _name + " Berada DiGrup Ini") else: targets.append(invite) if targets == []: pass else: for target in targets: try: kr.findAndAddContactsByMid(target) kr.inviteIntoGroup(msg.to,[target]) kr.sendText(msg.to,"Invite " + _name) wait['invite'] = False break except: kr.sendText(msg.to,"Error") wait['invite'] = False break #if msg.contentType == 13: # if wait['steal'] == True: # _name = msg.contentMetadata["displayName"] # copy = msg.contentMetadata["mid"] # groups = kr.getGroup(msg.to) # pending = groups.invitee # targets = [] # for s in groups.members: # if _name in s.displayName: # print "[Target] Stealed" # break # else: # targets.append(copy) # if targets == []: # pass # else: # for target in targets: # try: # kr.findAndAddContactsByMid(target) # contact = kr.getContact(target) # cu = kr.channel.getCover(target) # path = str(cu) # image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus # kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage) # kr.sendText(msg.to,"Profile Picture " + contact.displayName) # kr.sendImageWithURL(msg.to,image) # kr.sendText(msg.to,"Cover " + contact.displayName) # kr.sendImageWithURL(msg.to,path) # wait['steal'] = False # break # except: # pass if wait['alwayRead'] == True: if msg.toType == 0: kr.sendChatChecked(msg.from_,msg.id) else: kr.sendChatChecked(msg.to,msg.id) if op.type == 25: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: kr.sendText(msg.to,"In Blacklist") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False kr.sendText(msg.to,"Nothing") elif wait["dblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: del wait["commentBlack"][msg.contentMetadata["mid"]] kr.sendText(msg.to,"Done") wait["dblack"] = False else: wait["dblack"] = False kr.sendText(msg.to,"Not in Blacklist") elif wait["wblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: kr.sendText(msg.to,"In Blacklist") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False kr.sendText(msg.to,"Done") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] kr.sendText(msg.to,"Done") wait["dblacklist"] = False else: wait["dblacklist"] = False kr.sendText(msg.to,"Done") elif wait['contact'] == True: msg.contentType = 0 kr.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = kr.getContact(msg.contentMetadata["mid"]) try: cu = kr.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" kr.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) else: contact = kr.getContact(msg.contentMetadata["mid"]) try: cu = kr.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" kr.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) elif msg.contentType == 16: if wait['timeline'] == True: msg.contentType = 0 if wait["lang"] == "JP": msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"] else: msg.text = msg.contentMetadata["postEndUrl"] kr.sendText(msg.to,msg.text) elif msg.text is None: return elif msg.text.lower() == 'help': if wait["lang"] == "JP": kr.sendText(msg.to,helpmsg) else: kr.sendText(msg.to,helpmsg) elif msg.text.lower() == 'keybot': if wait["lang"] == "JP": kr.sendText(msg.to,keymsg) else: kr.sendText(msg.to,keymsg) elif msg.text.lower() == 'keypro': if wait["lang"] == "JP": kr.sendText(msg.to,helppro) else: kr.sendText(msg.to,helppro) elif msg.text.lower() == 'keyself': if wait["lang"] == "JP": kr.sendText(msg.to,helpself) else: kr.sendText(msg.to,helpself) elif msg.text.lower() == 'keygrup': if wait["lang"] == "JP": kr.sendText(msg.to,helpgrup) else: kr.sendText(msg.to,helpgrup) elif msg.text.lower() == 'keyset': if wait["lang"] == "JP": kr.sendText(msg.to,helpset) else: kr.sendText(msg.to,helpset) elif msg.text.lower() == 'keytran': if wait["lang"] == "JP": kr.sendText(msg.to,helptranslate) else: kr.sendText(msg.to,helptranslate) elif msg.text in ["Sp","Speed","speed"]: start = time.time() kr.sendText(msg.to, "❂➣Proses.....") elapsed_time = time.time() - start kr.sendText(msg.to, "%sseconds" % (elapsed_time)) elif msg.text.lower() == 'crash': msg.contentType = 13 msg.contentMetadata = {'mid': "u1f41296217e740650e0448b96851a3e2',"} kr.sendMessage(msg) kr.sendMessage(msg) elif msg.text.lower() == 'me': msg.contentType = 13 msg.contentMetadata = {'mid': mid} kr.sendMessage(msg) elif ".fb" in msg.text: a = msg.text.replace(".fb","") b = urllib.quote(a) kr.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses") kr.sendText(msg.to, "https://www.facebook.com" + b) kr.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses") #======================== FOR COMMAND MODE ON STARTING ==========================# elif msg.text.lower() == 'mode on': if wait["protect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protecion Already On") else: kr.sendText(msg.to,"Protecion Already On") else: wait["protect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Protecion Already On") else: kr.sendText(msg.to,"Protecion Already On") if wait["linkprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already On") else: kr.sendText(msg.to,"Protection Qr already On") else: wait["linkprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already On") else: kr.sendText(msg.to,"Protection Qr already On") if wait["inviteprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already On") else: kr.sendText(msg.to,"Protection Invite already On") else: wait["inviteprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи") else: kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи") if wait["cancelprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") else: wait["cancelprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") #======================== FOR COMMAND MODE OFF STARTING ==========================# elif msg.text.lower() == 'mode off': if wait["protect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection already Off") else: kr.sendText(msg.to,"Protection already Off") else: wait["protect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff") else: kr.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff") if wait["linkprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already off") else: kr.sendText(msg.to,"Protection Qr already off") else: wait["linkprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already Off") else: kr.sendText(msg.to,"Protection Qr already Off") if wait["inviteprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already Off") else: kr.sendText(msg.to,"Protection Invite already Off") else: wait["inviteprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already Off") else: kr.sendText(msg.to,"Protection Invite already Off") if wait["cancelprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Cancel already Off") else: kr.sendText(msg.to,"Protection Cancel already Off") else: wait["cancelprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Cancel already Off") else: kr.sendText(msg.to,"Protection Cancel already Off") #========================== FOR COMMAND BOT STARTING =============================# elif msg.text.lower() == 'contact on': if wait['contact'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: wait['contact'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") elif msg.text.lower() == 'contact off': if wait['contact'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: kr.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") else: wait['contact'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: kr.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") elif msg.text.lower() == 'protect on': if wait["protect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protecion Already On") else: kr.sendText(msg.to,"Protecion Already On") else: wait["protect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Protecion Already On") else: kr.sendText(msg.to,"Protecion Already On") elif msg.text.lower() == 'qr on': if wait["linkprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already On") else: kr.sendText(msg.to,"Protection Qr already On") else: wait["linkprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already On") else: kr.sendText(msg.to,"Protection Qr already On") elif msg.text.lower() == 'invite on': if wait["inviteprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already On") else: kr.sendText(msg.to,"Protection Invite already On") else: wait["inviteprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи") else: kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи") elif msg.text.lower() == 'cancel on': if wait["cancelprotect"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") else: wait["cancelprotect"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") elif msg.text.lower() == 'autojoin on': if wait['autoJoin'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") else: wait['autoJoin'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") elif msg.text.lower() == 'autojoin off': if wait['autoJoin'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") else: wait['autoJoin'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") elif msg.text.lower() == 'protect off': if wait["protect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection already Off") else: kr.sendText(msg.to,"Protection already Off") else: wait["protect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff") else: kr.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff") elif msg.text.lower() == 'qr off': if wait["linkprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already off") else: kr.sendText(msg.to,"Protection Qr already off") else: wait["linkprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Qr already Off") else: kr.sendText(msg.to,"Protection Qr already Off") elif msg.text.lower() == 'invit off': if wait["inviteprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already Off") else: kr.sendText(msg.to,"Protection Invite already Off") else: wait["inviteprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Invite already Off") else: kr.sendText(msg.to,"Protection Invite already Off") elif msg.text.lower() == 'cancel off': if wait["cancelprotect"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Cancel already Off") else: kr.sendText(msg.to,"Protection Cancel already Off") else: wait["cancelprotect"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Protection Cancel already Off") else: kr.sendText(msg.to,"Protection Cancel already Off") elif "Grup cancel:" in msg.text: try: strnum = msg.text.replace("Grup cancel:","") if strnum == "off": wait['autoCancel']["on"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan") else: kr.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim") else: num = int(strnum) wait['autoCancel']["on"] = True if wait["lang"] == "JP": kr.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis") else: kr.sendText(msg.to,strnum + "The team declined to create the following automatic invitation") except: if wait["lang"] == "JP": kr.sendText(msg.to,"Nilai tidak benar") else: kr.sendText(msg.to,"Weird value") elif msg.text.lower() == 'autoleave on': if wait['leaveRoom'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Auto Leave room set to on") else: kr.sendText(msg.to,"Auto Leave room already on") else: wait['leaveRoom'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Auto Leave room set to on") else: kr.sendText(msg.to,"Auto Leave room already on") elif msg.text.lower() == 'autoleave off': if wait['leaveRoom'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Auto Leave room set to off") else: kr.sendText(msg.to,"Auto Leave room already off") else: wait['leaveRoom'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Auto Leave room set to off") else: kr.sendText(msg.to,"Auto Leave room already off") elif msg.text.lower() == 'share on': if wait['timeline'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Share set to on") else: kr.sendText(msg.to,"Share already on") else: wait['timeline'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Share set to on") else: kr.sendText(msg.to,"Share already on") elif msg.text.lower() == 'share off': if wait['timeline'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Share set to off") else: kr.sendText(msg.to,"Share already off") else: wait['timeline'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Share set to off") else: kr.sendText(msg.to,"Share already off") elif msg.text.lower() == "status": md = """╔═════════════\n""" if wait['contact'] == True: md+="╠❂➣Contact:on [✅]\n" else: md+="╠❂➣Contact:off [❌]\n" if wait['autoJoin'] == True: md+="╠❂➣Auto Join:on [✅]\n" else: md +="╠❂➣Auto Join:off [❌]\n" if wait['autoCancel']["on"] == True:md+="╠❂➣Auto cancel:" + str(wait['autoCancel']["members"]) + "[✅]\n" else: md+= "╠❂➣Group cancel:off [❌]\n" if wait['leaveRoom'] == True: md+="╠❂➣Auto leave:on [✅]\n" else: md+="╠❂➣Auto leave:off [❌]\n" if wait['timeline'] == True: md+="╠❂➣Share:on [✅]\n" else:md+="╠❂➣Share:off [❌]\n" if wait['autoAdd'] == True: md+="╠❂➣Auto add:on [✅]\n" else:md+="╠❂➣Auto add:off [❌]\n" if wait["protect"] == True: md+="╠❂➣Protect:on [✅]\n" else:md+="╠❂➣Protect:off [❌]\n" if wait["linkprotect"] == True: md+="╠❂➣Link Protect:on [✅]\n" else:md+="╠❂➣Link Protect:off [❌]\n" if wait["inviteprotect"] == True: md+="╠❂➣Invitation Protect:on [✅]\n" else:md+="╠❂➣Invitation Protect:off [❌]\n" if wait["cancelprotect"] == True: md+="╠❂➣Cancel Protect:on [✅]\n" else:md+="╠❂➣Cancel Protect:off [❌]\n╚═════════════" kr.sendText(msg.to,md) msg.contentType = 13 msg.contentMetadata = {'mid': "u63c3abd264a36812c1d2d36f87386c5a"} kr.sendMessage(msg) elif cms(msg.text,["creator","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "u63c3abd264a36812c1d2d36f87386c5a"} kr.sendMessage(msg) kr.sendText(msg.to,'❂➣ Creator yang manis kalem 􀜁􀄯􏿿') elif msg.text.lower() == 'autoadd on': if wait['autoAdd'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Auto add set to on") else: kr.sendText(msg.to,"Auto add already on") else: wait['autoAdd'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Auto add set to on") else: kr.sendText(msg.to,"Auto add already on") elif msg.text.lower() == 'autoadd off': if wait['autoAdd'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Auto add set to off") else: kr.sendText(msg.to,"Auto add already off") else: wait['autoAdd'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Auto add set to off") else: kr.sendText(msg.to,"Auto add already off") elif "Pesan set:" in msg.text: wait['message'] = msg.text.replace("Pesan set:","") kr.sendText(msg.to,"We changed the message") elif msg.text.lower() == 'pesan cek': if wait["lang"] == "JP": kr.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait['message']) else: kr.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait['message']) elif "Come Set:" in msg.text: c = msg.text.replace("Come Set:","") if c in [""," ","\n",None]: kr.sendText(msg.to,"Merupakan string yang tidak bisa diubah") else: wait["comment"] = c kr.sendText(msg.to,"Ini telah diubah\n\n" + c) elif msg.text in ["Com on","Com:on","Comment on"]: if wait["commentOn"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Aku berada di") else: kr.sendText(msg.to,"To open") else: wait["commentOn"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Comment Actived") else: kr.sendText(msg.to,"Comment Has Been Active") elif msg.text in ["Come off"]: if wait["commentOn"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Hal ini sudah off") else: kr.sendText(msg.to,"It is already turned off") else: wait["commentOn"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Off") else: kr.sendText(msg.to,"To turn off") elif msg.text in ["Com","Comment"]: kr.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"])) elif msg.text in ["Com Bl"]: wait["wblack"] = True kr.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist") elif msg.text in ["Com hapus Bl"]: wait["dblack"] = True kr.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist") elif msg.text in ["Com Bl cek"]: if wait["commentBlack"] == {}: kr.sendText(msg.to,"Nothing in the blacklist") else: kr.sendText(msg.to,"The following is a blacklist") mc = "" for mi_d in wait["commentBlack"]: mc += "・" +kr.getContact(mi_d).displayName + "\n" kr.sendText(msg.to,mc) elif msg.text.lower() == 'jam on': if wait["clock"] == True: kr.sendText(msg.to,"Jam already on") else: wait["clock"] = True now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = kr.getProfile() profile.displayName = wait["cName"] + nowT kr.updateProfile(profile) kr.sendText(msg.to,"Jam set on") elif msg.text.lower() == 'jam off': if wait["clock"] == False: kr.sendText(msg.to,"Jam already off") else: wait["clock"] = False kr.sendText(msg.to,"Jam set off") elif "Jam say:" in msg.text: n = msg.text.replace("Jam say:","") if len(n.decode("utf-8")) > 30: kr.sendText(msg.to,"terlalu lama") else: wait["cName"] = n kr.sendText(msg.to,"Nama Jam Berubah menjadi:" + n) elif msg.text.lower() == 'update': if wait["clock"] == True: now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = kr.getProfile() profile.displayName = wait["cName"] + nowT kr.updateProfile(profile) kr.sendText(msg.to,"Diperbarui") else: kr.sendText(msg.to,"Silahkan Aktifkan Jam") elif "Image " in msg.text: search = msg.text.replace("Image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: kr.sendImageWithURL(msg.to,path) except: pass #========================== FOR COMMAND BOT FINISHED =============================# elif "Spam change:" in msg.text: if msg.toType == 2: wait['spam'] = msg.text.replace("Spam change:","") kr.sendText(msg.to,"spam changed") elif "Spam add:" in msg.text: if msg.toType == 2: wait['spam'] = msg.text.replace("Spam add:","") if wait["lang"] == "JP": kr.sendText(msg.to,"spam changed") else: kr.sendText(msg.to,"Done") elif "Spam:" in msg.text: if msg.toType == 2: strnum = msg.text.replace("Spam:","") num = int(strnum) for var in range(0,num): kr.sendText(msg.to, wait['spam']) #===================================== elif "Spam " in msg.text: if msg.toType == 2: bctxt = msg.text.replace("Spam ", "") t = kr.getAllContactIds() t = 500 while(t): kr.sendText(msg.to, (bctxt)) t-=1 #============================================== elif "Spamcontact @" in msg.text: _name = msg.text.replace("Spamcontact @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'spam') kr.sendText(g.mid,'mampus') kr.sendText(msg.to, "Done") print " Spammed !" #==============================================================================# elif msg.text in ["Invite"]: wait["invite"] = True kr.sendText(msg.to,"Send Contact") elif msg.text in ["Steal contact"]: wait['contact'] = True kr.sendText(msg.to,"Send Contact") elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama print "[Command]Like executed" kr.sendText(msg.to,"Like Status Owner") try: likeme() except: pass elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman print "[Command]Like executed" kr.sendText(msg.to,"Like Status Teman") try: likefriend() except: pass elif msg.text in ["Like:on","Like on"]: if wait['likeOn'] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"Done") else: wait['likeOn'] = True if wait["lang"] == "JP": kr.sendText(msg.to,"Already") elif msg.text in ["Like off","Like:off"]: if wait['likeOn'] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"Done") else: wait['likeOn'] = False if wait["lang"] == "JP": kr.sendText(msg.to,"Already") elif msg.text in ["Simisimi on","Simisimi:on"]: settings["simiSimi"][msg.to] = True kr.sendText(msg.to,"Simi mode On") elif msg.text in ["Simisimi off","Simisimi:off"]: settings["simiSimi"][msg.to] = False kr.sendText(msg.to,"Simi mode Off") elif msg.text in ["Autoread on","Read:on"]: wait['alwayRead'] = True kr.sendText(msg.to,"Auto read On") elif msg.text in ["Autoread off","Read:off"]: wait['alwayRead'] = False kr.sendText(msg.to,"Auto read Off") elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]: wait['detectMention'] = True kr.sendText(msg.to,"Auto respon tag On") elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]: wait['detectMention'] = False kr.sendText(msg.to,"Auto respon tag Off") elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]: wait['kickMention'] = True kr.sendText(msg.to,"Auto Kick tag ON") elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]: wait['kickMention'] = False kr.sendText(msg.to,"Auto Kick tag OFF") elif "Time" in msg.text: if msg.toType == 2: kr.sendText(msg.to,datetime.today().strftime('%H:%M:%S')) #==============================================================================# elif msg.text in ["Sambut on","sambut on"]: if wait["Wc"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"noтιғ yg joιn on") else: wait["Wc"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"already on") elif msg.text in ["Sambut off","sambut off"]: if wait["Wc"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"noтιғ yg joιn oғғ") else: wait["Wc"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"already oғғ") #==============================================================================# elif msg.text in ["Pergi on","pergi on"]: if wait["Lv"] == True: if wait["lang"] == "JP": kr.sendText(msg.to,"noтιғ yg leave on") else: wait["Lv"] = True if wait["lang"] == "JP": kr.sendText(msg.to,"already on") elif msg.text in ["Pergi off","pergi off"]: if wait["Lv"] == False: if wait["lang"] == "JP": kr.sendText(msg.to,"noтιғ yg leave oғғ") else: wait["Lv"] = False if wait["lang"] == "JP": kr.sendText(msg.to,"already oғғ") #==============================================================================# elif ":V" in msg.text: if msg.toType == 2: if msg.toType == 2: print "ok" _name = msg.text.replace(":V","") gs = kr.getGroup(msg.to) gs = kr.getGroup(msg.to) gs = kr.getGroup(msg.to) kr.sendText(msg.to,":>") kr.sendText(msg.to,":D") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Not found.") kr.sendText(msg.to,"Not found.") else: for target in targets: try: klist=[kr,kr,kr] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: kr.sendText(msg.to,"Njir") kr.sendText(msg.to,":V") elif msg.text in ["Salam1"]: kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ") kr.sendText(msg.to,"Assalamu'alaikum") elif msg.text in ["Salam2"]: kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُرَحْمَةُ اللهِ وَبَرَكَاتُهُ") kr.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb") elif "Salam3" in msg.text: if msg.from_ in owner: kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ") kr.sendText(msg.to,"Assalamu'alaikum") kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ") kr.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb") if msg.toType == 2: print "ok" _name = msg.text.replace("Salam3","") gs = kr.getGroup(msg.to) kr.sendText(msg.to,"maaf kalo gak sopan") kr.sendText(msg.to,"Qo salamnya gak ada yang jawab ya..!!") kr.sendText(msg.to,"hehehhehe") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Not found") else: for target in targets: if target not in admin: try: klist=[kr] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ") kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ") kr.sendText(msg.to,"Nah salamnya jawab sendiri dah") elif ("Kick " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: kr.kickoutFromGroup(msg.to,[target]) except: kr.sendText(msg.to,"Error") elif ("Cium " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: kr.kickoutFromGroup(msg.to,[target]) kr.inviteIntoGroup(msg.to,[target]) kr.cancelGroupInvitation(msg.to,[target]) except: kr.sendText(msg.to,"Error") elif "Kick: " in msg.text: midd = msg.text.replace("Kick: ","") kr.kickoutFromGroup(msg.to,[midd]) elif 'invite ' in msg.text.lower(): key = msg.text[-33:] kr.findAndAddContactsByMid(key) kr.inviteIntoGroup(msg.to, [key]) contact = kr.getContact(key) elif msg.text.lower() == 'cancel': if msg.toType == 2: group = kr.getGroup(msg.to) if group.invitee is not None: gInviMids = [contact.mid for contact in group.invitee] kr.cancelGroupInvitation(msg.to, gInviMids) else: if wait["lang"] == "JP": kr.sendText(msg.to,"Tidak ada undangan") else: kr.sendText(msg.to,"Invitan tidak ada") else: if wait["lang"] == "JP": kr.sendText(msg.to,"Tidak ada undangan :V") else: kr.sendText(msg.to,"Invitan tidak ada :V") elif msg.text.lower() == 'link on': if msg.toType == 2: group = kr.getGroup(msg.to) group.preventJoinByTicket = False kr.updateGroup(group) if wait["lang"] == "JP": kr.sendText(msg.to,"URL open") else: kr.sendText(msg.to,"URL open") else: if wait["lang"] == "JP": kr.sendText(msg.to,"It can not be used outside the group") else: kr.sendText(msg.to,"Can not be used for groups other than") elif msg.text.lower() == 'link off': if msg.toType == 2: group = kr.getGroup(msg.to) group.preventJoinByTicket = True kr.updateGroup(group) if wait["lang"] == "JP": kr.sendText(msg.to,"URL close") else: kr.sendText(msg.to,"URL close") else: if wait["lang"] == "JP": kr.sendText(msg.to,"It can not be used outside the group") else: kr.sendText(msg.to,"Can not be used for groups other than") elif msg.text in ["Url","Gurl"]: if msg.toType == 2: g = kr.getGroup(msg.to) if g.preventJoinByTicket == True: g.preventJoinByTicket = False kr.updateGroup(g) gurl = kr.reissueGroupTicket(msg.to) kr.sendText(msg.to,"line://ti/g/" + gurl) elif "Gcreator" == msg.text: try: group = kr.getGroup(msg.to) GS = group.creator.mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': GS} kr.sendMessage(M) except: W = group.members[0].mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': W} kr.sendMessage(M) kr.sendText(msg.to,"Creator Grup") elif msg.text.lower() == 'invite:gcreator': if msg.toType == 2: ginfo = kr.getGroup(msg.to) try: gcmid = ginfo.creator.mid except: gcmid = "Error" if wait["lang"] == "JP": kr.inviteIntoGroup(msg.to,[gcmid]) else: kr.inviteIntoGroup(msg.to,[gcmid]) elif ("Gn " in msg.text): if msg.toType == 2: X = kr.getGroup(msg.to) X.name = msg.text.replace("Gn ","") kr.updateGroup(X) elif msg.text.lower() == 'infogrup': group = kr.getGroup(msg.to) try: gCreator = group.creator.displayName except: gCreator = "Error" md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" kr.sendText(msg.to,md) elif msg.text.lower() == 'grup id': gid = kr.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:%s\n" % (kr.getGroup(i).name,i) kr.sendText(msg.to,h) #==============================================================================# elif msg.text in ["Glist"]: gid = kr.getGroupIdsJoined() h = "" for i in gid: h += "%s\n" % (kr.getGroup(i).name +" ? ["+str(len(kr.getGroup(i).members))+"]") kr.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]") elif msg.text.lower() == 'gcancel': gid = kr.getGroupIdsInvited() for i in gid: kr.rejectGroupInvitation(i) if wait["lang"] == "JP": kr.sendText(msg.to,"Aku menolak semua undangan") else: kr.sendText(msg.to,"He declined all invitations") elif "Auto add" in msg.text: thisgroup = kr.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] kr.findAndAddContactsByMids(mi_d) kr.sendText(msg.to,"Success Add all") #==============================================================================# elif "Tagall" == msg.text.lower(): group = kr.getGroup(msg.to) nama = [contact.mid for contact in group.members] nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama) if jml <= 100: summon(msg.to, nama) if jml > 100 and jml < 200: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, len(nama)-1): nm2 += [nama[j]] summon(msg.to, nm2) if jml > 200 and jml < 500: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, 199): nm2 += [nama[j]] summon(msg.to, nm2) for k in range(200, 299): nm3 += [nama[k]] summon(msg.to, nm3) for l in range(300, 399): nm4 += [nama[l]] summon(msg.to, nm4) for m in range(400, len(nama)-1): nm5 += [nama[m]] summon(msg.to, nm5) if jml > 500: print "Terlalu Banyak Men 500+" cnt = Message() cnt.text = "Jumlah:\👉n" +str(jml) + "👈 Members" cnt.to = msg.to kr.sendMessage(cnt) elif "cctv on" == msg.text.lower(): if msg.to in wait2['readPoint']: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) kr.sendText(msg.to,"Setpoint already on") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) kr.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S')) print wait2 elif "cctv off" == msg.text.lower(): if msg.to not in wait2['readPoint']: kr.sendText(msg.to,"Setpoint already off") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass kr.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S')) elif msg.text in ["toong","Toong"]: if msg.toType == 2: print "\nRead aktif..." if msg.to in wait2['readPoint']: if wait2['ROM'][msg.to].items() == []: chiya = "" else: chiya = "" for rom in wait2['ROM'][msg.to].items(): print rom chiya += rom[1] + "\n" kr.sendText(msg.to, "╔═════════════ \n╠❂➣Sider :\n╠═════════════ %s\n╠\n╠═════════════\n╠❂➣Reader :\n╠═════════════ %s\n╠\n╠═════════════\n╠In the last seen point:\n╠[%s]\n╚═════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to])) print "\nReading Point Set..." try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S') wait2['ROM'][msg.to] = {} print "toong ready" kr.sendText(msg.to, "Auto Read Point!!" + (wait2['setTime'][msg.to])) else: kr.sendText(msg.to, "Ketik [Cctv on] dulu, baru ketik [Toong]") elif "intip" == msg.text.lower(): if msg.to in wait2['readPoint']: if wait2['ROM'][msg.to].items() == []: kr.sendText(msg.to, "Reader:\nNone") else: chiya = [] for rom in wait2['ROM'][msg.to].items(): chiya.append(rom[1]) cmem = kr.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan+"@a\n" xlen = str(len(zxc)+len(xpesan)) xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1) zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid} zx2.append(zx) zxc += pesan2 msg.contentType = 0 print zxc msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S')) lol ={"MENTION":str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')} print lol msg.contentMetadata = lol try: kr.sendMessage(msg) except Exception as error: print error pass else: kr.sendText(msg.to, "Lurking has not been set.") elif "Gbc " in msg.text: bc = msg.text.replace("Gbc ","") gid = kr.getGroupIdsJoined() for i in gid: kr.sendText(i, bc) elif "Cbroadcast: " in msg.text: bc = msg.text.replace("Cbroadcast: ","") gid = kr.getAllContactIds() for i in gid: kr.sendText(i, bc) elif "Spam change: " in msg.text: wait['spam'] = msg.text.replace("Spam change: ","") kr.sendText(msg.to,"spam changed") elif "Spam add: " in msg.text: wait['spam'] = msg.text.replace("Spam add: ","") if wait["lang"] == "JP": kr.sendText(msg.to,"spam changed") else: kr.sendText(msg.to,"Done") elif "Spam: " in msg.text: strnum = msg.text.replace("Spam: ","") num = int(strnum) for var in range(0,num): kr.sendText(msg.to, wait['spam']) elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={"MENTION":'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) else: pass elif "spam" in msg.text: txt = msg.text.split(" ") jmlh = int(txt[2]) teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","") tulisan = jmlh * (teks+"\n") if txt[1] == "on": if jmlh <= 100000: for x in range(jmlh): kr.sendText(msg.to, teks) else: kr.sendText(msg.to, "Out of Range!") elif txt[1] == "off": if jmlh <= 100000: kr.sendText(msg.to, tulisan) else: kr.sendText(msg.to, "Out Of Range!") elif ("Micadd " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: mimic["target"][target] = True kr.sendText(msg.to,"Target ditambahkan!") break except: kr.sendText(msg.to,"Fail !") break elif ("Micdel " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: del mimic["target"][target] kr.sendText(msg.to,"Target dihapuskan!") break except: kr.sendText(msg.to,"Fail !") break elif msg.text in ["Miclist"]: if mimic["target"] == {}: kr.sendText(msg.to,"nothing") else: mc = "Target mimic user\n" for mi_d in mimic["target"]: mc += "?? "+kr.getContact(mi_d).displayName + "\n" kr.sendText(msg.to,mc) elif "Mimic target " in msg.text: if mimic["copy"] == True: siapa = msg.text.replace("Mimic target ","") if siapa.rstrip(' ') == "me": mimic["copy2"] = "me" kr.sendText(msg.to,"Mimic change to me") elif siapa.rstrip(' ') == "target": mimic["copy2"] = "target" kr.sendText(msg.to,"Mimic change to target") else: kr.sendText(msg.to,"I dont know") elif "Mimic " in msg.text: cmd = msg.text.replace("Mimic ","") if cmd == "on": if mimic["status"] == False: mimic["status"] = True kr.sendText(msg.to,"Reply Message on") else: kr.sendText(msg.to,"Sudah on") elif cmd == "off": if mimic["status"] == True: mimic["status"] = False kr.sendText(msg.to,"Reply Message off") else: kr.sendText(msg.to,"Sudah off") elif "Setimage: " in msg.text: wait['pap'] = msg.text.replace("Setimage: ","") kr.sendText(msg.to, "Pap telah di Set") elif msg.text in ["Papimage","Papim",'pap']: kr.sendImageWithURL(msg.to,wait['pap']) elif "Setvideo: " in msg.text: wait['pap'] = msg.text.replace("Setvideo: ","") kr.sendText(msg.to,"Video Has Ben Set To") elif msg.text in ["Papvideo","Papvid"]: kr.sendVideoWithURL(msg.to,wait['pap']) elif "TL:" in msg.text: if msg.toType == 2: tl_text = msg.text.replace("TL:","") kr.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) #==============================================================================# elif msg.text.lower() == 'mymid': kr.sendText(msg.to,mid) elif "Timeline: " in msg.text: tl_text = msg.text.replace("Timeline: ","") kr.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) elif "Myname: " in msg.text: string = msg.text.replace("Myname: ","") if len(string.decode('utf-8')) <= 10000000000: profile = kr.getProfile() profile.displayName = string kr.updateProfile(profile) kr.sendText(msg.to,"Changed " + string + "") elif "Mybio: " in msg.text: string = msg.text.replace("Mybio: ","") if len(string.decode('utf-8')) <= 10000000000: profile = kr.getProfile() profile.statusMessage = string kr.updateProfile(profile) kr.sendText(msg.to,"Changed " + string) elif msg.text in ["Myname"]: h = kr.getContact(mid) kr.sendText(msg.to,"===[DisplayName]===\n" + h.displayName) elif msg.text in ["Mybio"]: h = kr.getContact(mid) kr.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage) elif msg.text in ["Mypict"]: h = kr.getContact(mid) kr.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Myvid"]: h = kr.getContact(mid) kr.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Urlpict"]: h = kr.getContact(mid) kr.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Mycover"]: h = kr.getContact(mid) cu = kr.channel.getCover(mid) path = str(cu) kr.sendImageWithURL(msg.to, path) elif msg.text in ["Urlcover"]: h = kr.getContact(mid) cu = kr.channel.getCover(mid) path = str(cu) kr.sendText(msg.to, path) elif "Getmid @" in msg.text: _name = msg.text.replace("Getmid @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: kr.sendText(msg.to, g.mid) else: pass elif "Getinfo" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = kr.getContact(key1) cu = kr.channel.getCover(key1) try: kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu)) except: kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu)) elif "Getbio" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = kr.getContact(key1) cu = kr.channel.getCover(key1) try: kr.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) except: kr.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) elif "Getname" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = kr.getContact(key1) cu = kr.channel.getCover(key1) try: kr.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) except: kr.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) elif "Getprofile" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = kr.getContact(key1) cu = kr.channel.getCover(key1) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) kr.sendText(msg.to,"Profile Picture " + contact.displayName) kr.sendImageWithURL(msg.to,image) kr.sendText(msg.to,"Cover " + contact.displayName) kr.sendImageWithURL(msg.to,path) except: pass elif "Getcontact" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] mmid = kr.getContact(key1) msg.contentType = 13 msg.contentMetadata = {"mid": key1} kr.sendMessage(msg) elif "Getpict @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getpict @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus kr.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getvid @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getvid @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus kr.sendVideoWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Picturl @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Picturl @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus kr.sendText(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getcover @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) cu = kr.channel.getCover(target) path = str(cu) kr.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Coverurl @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Coverurl @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) cu = kr.channel.getCover(target) path = str(cu) kr.sendText(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Getgrup image" in msg.text: group = kr.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus kr.sendImageWithURL(msg.to,path) elif "Urlgrup image" in msg.text: group = kr.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus kr.sendText(msg.to,path) elif "Mycopy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("Mycopy @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to, "Not Found...") else: for target in targets: try: kr.CloneContactProfile(target) kr.sendText(msg.to, "Copied.") except Exception as e: print e elif msg.text in ["Mybackup","mybackup"]: try: kr.updateDisplayPicture(backup.pictureStatus) kr.updateProfile(backup) kr.sendText(msg.to, "Refreshed.") except Exception as e: kr.sendText(msg.to, str(e)) #==============================================================================# elif "Fancytext: " in msg.text: txt = msg.text.replace("Fancytext: ", "") kr.kedapkedip(msg.to,txt) print "[Command] Kedapkedip" elif "Translate-id " in msg.text: isi = msg.text.replace("Tr-id ","") translator = Translator() hasil = translator.translate(isi, dest='id') A = hasil.text A = A.encode('utf-8') kr.sendText(msg.to, A) elif "Translate-en " in msg.text: isi = msg.text.replace("Tr-en ","") translator = Translator() hasil = translator.translate(isi, dest='en') A = hasil.text A = A.encode('utf-8') kr.sendText(msg.to, A) elif "Translate-ar" in msg.text: isi = msg.text.replace("Tr-ar ","") translator = Translator() hasil = translator.translate(isi, dest='ar') A = hasil.text A = A.encode('utf-8') kr.sendText(msg.to, A) elif "Translate-jp" in msg.text: isi = msg.text.replace("Tr-jp ","") translator = Translator() hasil = translator.translate(isi, dest='ja') A = hasil.text A = A.encode('utf-8') kr.sendText(msg.to, A) elif "Translate-ko" in msg.text: isi = msg.text.replace("Tr-ko ","") translator = Translator() hasil = translator.translate(isi, dest='ko') A = hasil.text A = A.encode('utf-8') kr.sendText(msg.to, A) elif "Id@en" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'en' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**") elif "En@id" in msg.text: bahasa_awal = 'en' bahasa_tujuan = 'id' kata = msg.text.replace("En@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**") elif "Jp@id" in msg.text: bahasa_awal = 'ja' bahasa_tujuan = 'id' kata = msg.text.replace("Jp@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@th" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'th' kata = msg.text.replace("Id@th ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----") elif "Th@id" in msg.text: bahasa_awal = 'th' bahasa_tujuan = 'id' kata = msg.text.replace("Th@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----") elif "Id@ar" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ar' kata = msg.text.replace("Id@ar ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----") elif "Ar@id" in msg.text: bahasa_awal = 'ar' bahasa_tujuan = 'id' kata = msg.text.replace("Ar@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@ko" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ko' kata = msg.text.replace("Id@ko ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----") elif "Ko@id" in msg.text: bahasa_awal = 'ko' bahasa_tujuan = 'id' kata = msg.text.replace("Ko@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] kr.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif msg.text.lower() == 'welcome': ginfo = kr.getGroup(msg.to) kr.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name)) jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name)) kr.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName ) tts = gTTS(text=jawaban1, lang='id') tts.save('tts.mp3') kr.sendAudio(msg.to,'tts.mp3') elif "Say-id " in msg.text: say = msg.text.replace("Say-id ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif "Say-en " in msg.text: say = msg.text.replace("Say-en ","") lang = 'en' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif "Say-jp " in msg.text: say = msg.text.replace("Say-jp ","") lang = 'ja' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif "Say-ar " in msg.text: say = msg.text.replace("Say-ar ","") lang = 'ar' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif "Say-ko " in msg.text: say = msg.text.replace("Say-ko ","") lang = 'ko' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif "Kapan " in msg.text: tanya = msg.text.replace("Kapan ","") jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') kr.sendAudio(msg.to,'tts.mp3') elif "Apakah " in msg.text: tanya = msg.text.replace("Apakah ","") jawab = ("Ya","Tidak","Mungkin","Bisa jadi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') kr.sendAudio(msg.to,'tts.mp3') elif 'Youtubemp4 ' in msg.text: try: textToSearch = (msg.text).replace('Youtubemp4 ', "").strip() query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") results = soup.find(attrs={'class': 'yt-uix-tile-link'}) ght = ('https://www.youtube.com' + results['href']) kr.sendVideoWithURL(msg.to, ght) except: kr.sendText(msg.to, "Could not find it") elif "Youtubesearch " in msg.text: query = msg.text.replace("Youtube ","") with requests.session() as s: s.headers['user-agent'] = 'Mozilla/5.0' url = 'http://www.youtube.com/results' params = {'search_query': query} r = s.get(url, params=params) soup = BeautifulSoup(r.content, 'html5lib') hasil = "" for a in soup.select('.yt-lockup-title > a[title]'): if '&list=' not in a['href']: hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n')) kr.sendText(msg.to,hasil) print '[Command] Youtube Search' elif "Lirik " in msg.text: try: songname = msg.text.lower().replace("Lirik ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] kr.sendText(msg.to, hasil) except Exception as wak: kr.sendText(msg.to, str(wak)) elif "Wikipedia " in msg.text: try: wiki = msg.text.lower().replace("Wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=1) pesan+="\n" pesan+=wikipedia.page(wiki).url kr.sendText(msg.to, pesan) except: try: pesan="Over Text Limit! Please Click link\n" pesan+=wikipedia.page(wiki).url kr.sendText(msg.to, pesan) except Exception as e: kr.sendText(msg.to, str(e)) elif "Music " in msg.text: try: songname = msg.text.lower().replace("Music ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'This is Your Music\n' hasil += 'Judul : ' + song[0] hasil += '\nDurasi : ' + song[1] hasil += '\nLink Download : ' + song[4] kr.sendText(msg.to, hasil) kr.sendText(msg.to, "Please Wait for audio...") kr.sendAudioWithURL(msg.to, song[4]) except Exception as njer: kr.sendText(msg.to, str(njer)) elif "Image " in msg.text: search = msg.text.replace("Image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: kr.sendImageWithURL(msg.to,path) except: pass elif "Profileig " in msg.text: try: instagram = msg.text.replace("Profileig ","") response = requests.get("https://www.instagram.com/"+instagram+"?__a=1") data = response.json() namaIG = str(data['user']['full_name']) bioIG = str(data['user']['biography']) mediaIG = str(data['user']['media']['count']) verifIG = str(data['user']['is_verified']) usernameIG = str(data['user']['username']) followerIG = str(data['user']['followed_by']['count']) profileIG = data['user']['profile_pic_url_hd'] privateIG = str(data['user']['is_private']) followIG = str(data['user']['follows']['count']) link = "Link: " + "https://www.instagram.com/" + instagram text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link kr.sendImageWithURL(msg.to, profileIG) kr.sendText(msg.to, str(text)) except Exception as e: kr.sendText(msg.to, str(e)) elif "Checkdate " in msg.text: tanggal = msg.text.replace("Checkdate ","") r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) lahir = data["data"]["lahir"] usia = data["data"]["usia"] ultah = data["data"]["ultah"] zodiak = data["data"]["zodiak"] kr.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============") elif msg.text in ["Kalender","Time","Waktu"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" kr.sendText(msg.to, rst) #==============================================================================# elif msg.text.lower() == 'ifconfig': botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0] kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===") elif msg.text.lower() == 'system': botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0] kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===") elif msg.text.lower() == 'kernel': botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0] kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===") elif msg.text.lower() == 'cpu': botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0] kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===") elif "Restart" in msg.text: print "[Command]Restart" try: kr.sendText(msg.to,"Restarting...") kr.sendText(msg.to,"Restart Success") restart_program() except: kr.sendText(msg.to,"Please wait") restart_program() pass elif "Turn off" in msg.text: try: import sys sys.exit() except: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot has been active "+waktu(eltime) kr.sendText(msg.to,van) #================================ KRIS SCRIPT STARTED ==============================================# elif "google " in msg.text: a = msg.text.replace("google ","") b = urllib.quote(a) kr.sendText(msg.to,"Sedang Mencari om...") kr.sendText(msg.to, "https://www.google.com/" + b) kr.sendText(msg.to,"Ketemu om ^") elif cms(msg.text,["/creator","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "ub14f769cdf42d8c8a618ebe91ac2c8c7"} kr.sendMessage(msg) elif "friendpp: " in msg.text: if msg.from_ in admin: suf = msg.text.replace('friendpp: ','') gid = kr.getAllContactIds() for i in gid: h = kr.getContact(i).displayName gna = kr.getContact(i) if h == suf: kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Checkmid: " in msg.text: saya = msg.text.replace("Checkmid: ","") msg.contentType = 13 msg.contentMetadata = {"mid":saya} kr.sendMessage(msg) contact = kr.getContact(saya) cu = kr.channel.getCover(saya) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) kr.sendText(msg.to,"Profile Picture " + contact.displayName) kr.sendImageWithURL(msg.to,image) kr.sendText(msg.to,"Cover " + contact.displayName) kr.sendImageWithURL(msg.to,path) except: pass elif "Checkid: " in msg.text: saya = msg.text.replace("Checkid: ","") gid = kr.getGroupIdsJoined() for i in gid: h = kr.getGroup(i).id group = kr.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" kr.sendText(msg.to,md) kr.sendMessage(msg) kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif msg.text in ["Friendlist"]: contactlist = kr.getAllContactIds() kontak = kr.getContacts(contactlist) num=1 msgs="═════════List Friend═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak) kr.sendText(msg.to, msgs) elif msg.text in ["Memlist"]: kontak = kr.getGroup(msg.to) group = kontak.members num=1 msgs="═════════List Member═════════-" for ids in group: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group) kr.sendText(msg.to, msgs) elif "Friendinfo: " in msg.text: saya = msg.text.replace('Friendinfo: ','') gid = kr.getAllContactIds() for i in gid: h = kr.getContact(i).displayName contact = kr.getContact(i) cu = kr.channel.getCover(i) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus if h == saya: kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) kr.sendText(msg.to,"Profile Picture " + contact.displayName) kr.sendImageWithURL(msg.to,image) kr.sendText(msg.to,"Cover " + contact.displayName) kr.sendImageWithURL(msg.to,path) elif "Friendpict: " in msg.text: saya = msg.text.replace('Friendpict: ','') gid = kr.getAllContactIds() for i in gid: h = kr.getContact(i).displayName gna = kr.getContact(i) if h == saya: kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif msg.text in ["Friendlistmid"]: gruplist = kr.getAllContactIds() kontak = kr.getContacts(gruplist) num=1 msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.mid) num=(num+1) msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak) kr.sendText(msg.to, msgs) elif msg.text in ["Blocklist"]: blockedlist = kr.getBlockedContactIds() kontak = kr.getContacts(blockedlist) num=1 msgs="═════════List Blocked═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak) kr.sendText(msg.to, msgs) elif msg.text in ["Gruplist"]: gruplist = kr.getGroupIdsJoined() kontak = kr.getGroups(gruplist) num=1 msgs="═════════List Grup═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.name) num=(num+1) msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak) kr.sendText(msg.to, msgs) elif msg.text in ["Gruplistmid"]: gruplist = kr.getGroupIdsJoined() kontak = kr.getGroups(gruplist) num=1 msgs="═════════List GrupMid═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.id) num=(num+1) msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak) kr.sendText(msg.to, msgs) elif "Grupimage: " in msg.text: saya = msg.text.replace('Grupimage: ','') gid = kr.getGroupIdsJoined() for i in gid: h = kr.getGroup(i).name gna = kr.getGroup(i) if h == saya: kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Grupname" in msg.text: saya = msg.text.replace('Grupname','') gid = kr.getGroup(msg.to) kr.sendText(msg.to, "[Nama Grup : ]\n" + gid.name) elif "Grupid" in msg.text: saya = msg.text.replace('Grupid','') gid = kr.getGroup(msg.to) kr.sendText(msg.to, "[ID Grup : ]\n" + gid.id) elif "Grupinfo: " in msg.text: saya = msg.text.replace('Grupinfo: ','') gid = kr.getGroupIdsJoined() for i in gid: h = kr.getGroup(i).name group = kr.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" kr.sendText(msg.to,md) kr.sendMessage(msg) kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={"MENTION":'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) print "Spamtag Berhasil." elif "crashkontak @" in msg.text: _name = msg.text.replace("crashkontak @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} kr.sendMessage(g.mid,msg.to + str(msg)) kr.sendText(g.mid, "hai") kr.sendText(g.mid, "salken") kr.sendText(msg.to, "Done") print " Spammed crash !" elif "playstore " in msg.text.lower(): tob = msg.text.lower().replace("playstore ","") kr.sendText(msg.to,"Sedang Mencari boss...") kr.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob) kr.sendText(msg.to,"Ketemu boss ^") elif 'wikipedia ' in msg.text.lower(): try: wiki = msg.text.lower().replace("wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=3) pesan+="\n" pesan+=wikipedia.page(wiki).url kr.sendText(msg.to, pesan) except: try: pesan="Teks nya kepanjangan! ketik link dibawah aja\n" pesan+=wikipedia.page(wiki).url kr.sendText(msg.to, pesan) except Exception as e: kr.sendText(msg.to, str(e)) elif "say " in msg.text.lower(): say = msg.text.lower().replace("say ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") kr.sendAudio(msg.to,"hasil.mp3") elif msg.text in ["spam gift 25"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4', 'PRDTYPE': 'THEME', 'MSGTPL': '8'} msg.text = None kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) elif msg.text in ["Gcreator:inv"]: if msg.from_ in admin: ginfo = kr.getGroup(msg.to) gCreator = ginfo.creator.mid try: kr.findAndAddContactsByMid(gCreator) kr.inviteIntoGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif msg.text in ["Gcreator:kick"]: if msg.from_ in admin: ginfo = kr.getGroup(msg.to) gCreator = ginfo.creator.mid try: kr.findAndAddContactsByMid(gCreator) kr.kickoutFromGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif 'lirik ' in msg.text.lower(): try: songname = msg.text.lower().replace('lirik ','') params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] kr.sendText(msg.to, hasil) except Exception as wak: kr.sendText(msg.to, str(wak)) elif "Getcover @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = kr.getContact(target) cu = kr.channel.getCover(target) path = str(cu) kr.sendImageWithURL(msg.to, path) except: pass print "[Command]dp executed" elif "idline: " in msg.text: msgg = msg.text.replace('idline: ','') conn = kr.findContactsByUserid(msgg) if True: msg.contentType = 13 msg.contentMetadata = {'mid': conn.mid} kr.sendText(msg.to,"http://line.me/ti/p/~" + msgg) kr.sendMessage(msg) elif "reinvite" in msg.text.split(): if msg.toType == 2: group = kr.getGroup(msg.to) if group.invitee is not None: try: grCans = [contact.mid for contact in group.invitee] kr.findAndAddContactByMid(msg.to, grCans) kr.cancelGroupInvitation(msg.to, grCans) kr.inviteIntoGroup(msg.to, grCans) except Exception as error: print error else: if wait["lang"] == "JP": kr.sendText(msg.to,"No Invited") else: kr.sendText(msg.to,"Error") else: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot sudah berjalan selama "+waktu(eltime) kr.sendText(msg.to,van) elif msg.text in ["Restart"]: kr.sendText(msg.to, "Bot has been restarted") restart_program() print "@Restart" elif msg.text in ["time"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" client.sendText(msg.to, rst) elif "image " in msg.text: search = msg.text.replace("image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: kr.sendImageWithURL(msg.to,path) except: pass elif 'instagram ' in msg.text.lower(): try: instagram = msg.text.lower().replace("instagram ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html5lib') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() user = "Name: " + text[-2] + "\n" user1 = "Username: " + text[-1] + "\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "**INSTAGRAM INFO USER**\n" details = "\n**INSTAGRAM INFO USER**" kr.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) kr.sendImageWithURL(msg.to, text1[0]) except Exception as njer: kr.sendText(msg.to, str(njer)) elif msg.text in ["Attack"]: msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) kr.sendMessage(msg) elif msg.text.lower() == '...': msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} kr.sendMessage(msg) #=================================KRIS SCRIPT FINISHED =============================================# elif "Ban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Ban @","") _nametarget = _name.rstrip() gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: wait["blacklist"][target] = True kr.sendText(msg.to,_nametarget + " Succes Add to Blacklist") except: kr.sendText(msg.to,"Error") elif "Unban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Unban @","") _nametarget = _name.rstrip() gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: del wait["blacklist"][target] kr.sendText(msg.to,_nametarget + " Delete From Blacklist") except: kr.sendText(msg.to,_nametarget + " Not In Blacklist") elif "Ban:" in msg.text: nk0 = msg.text.replace("Ban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = kr.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) kr.sendText(msg.to,_name + " Succes Add to Blacklist") except: kr.sendText(msg.to,"Error") elif "Unban:" in msg.text: nk0 = msg.text.replace("Unban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = kr.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: del wait["blacklist"][target] f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) kr.sendText(msg.to,_name + " Delete From Blacklist") except: kr.sendText(msg.to,_name + " Not In Blacklist") elif msg.text in ["Clear"]: wait["blacklist"] = {} kr.sendText(msg.to,"Blacklist Telah Dibersihkan") elif msg.text in ["Ban:on"]: wait["wblacklist"] = True kr.sendText(msg.to,"Send Contact") elif msg.text in ["Unban:on"]: wait["dblacklist"] = True kr.sendText(msg.to,"Send Contact") elif msg.text in ["Banlist"]: if wait["blacklist"] == {}: kr.sendText(msg.to,"Tidak Ada Blacklist") else: kr.sendText(msg.to,"Daftar Banlist") num=1 msgs="*Blacklist*" for mi_d in wait["blacklist"]: msgs+="\n[%i] %s" % (num, kr.getContact(mi_d).displayName) num=(num+1) msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"]) kr.sendText(msg.to, msgs) elif msg.text in ["Conban","Contactban","Contact ban"]: if wait["blacklist"] == {}: kr.sendText(msg.to,"Tidak Ada Blacklist") else: kr.sendText(msg.to,"Daftar Blacklist") h = "" for i in wait["blacklist"]: h = kr.getContact(i) M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': i} kr.sendMessage(M) elif msg.text in ["Midban","Mid ban"]: if msg.toType == 2: group = kr.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) num=1 cocoa = "══════════List Blacklist═════════" for mm in matched_list: cocoa+="\n[%i] %s" % (num, mm) num=(num+1) cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list) kr.sendText(msg.to,cocoa) elif msg.text.lower() == 'scan blacklist': if msg.toType == 2: group = kr.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: kr.sendText(msg.to,"Tidak ada Daftar Blacklist") return for jj in matched_list: try: kr.kickoutFromGroup(msg.to,[jj]) print (msg.to,[jj]) except: pass #==============================================# if op.type == 26: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: kr.sendText(msg.to,"already") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False kr.sendText(msg.to,"decided not to comment") #-------------------------------------------------------- elif msg.text is None: return #-------------------------------------------------------- elif msg.text in ["chery glist"]: #Melihat List Group if msg.from_ in owner: gids = kr.getGroupIdsJoined() h = "" for i in gids: #####gn = kr.getGroup(i).name h += "[•]%s Member\n" % (kr.getGroup(i).name +"👉"+str(len(kr.getGroup(i).members))) kr.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids))) elif msg.text in ["chery glist2"]: if msg.from_ in owner: gid = kr.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:%s\n" % (kr.getGroup(i).name,i) kr.sendText(msg.to,h) elif "chery asupka " in msg.text: if msg.from_ in owner: gid = msg.text.replace("chery asupka ","") if gid == "": kr.sendText(msg.to,"Invalid group id") else: try: kr.findAndAddContactsByMid(msg.from_) kr.inviteIntoGroup(gid,[msg.from_]) kr.sendText(msg.to,"succes di invite boss, silahkan masuk...!!") except: kr.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu") elif "chery bye" in msg.text: if msg.from_ in owner: if msg.toType == 2: ginfo = kr.getGroup(msg.to) try: kr.leaveGroup(msg.to) except: pass elif "chery megs " in msg.text: if msg.from_ in owner: gName = msg.text.replace("chery megs ","") ap = kr.getGroups([msg.to]) semua = [contact.mid for contact in ap[0].members] nya = ap[0].members for a in nya: Mi_d = str(a.mid) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) elif "#cmegs " in msg.text: if msg.from_ in owner: gName = msg.text.replace("#cmegs ","") ap = kr.getGroups([msg.to]) semua = findAndAddContactsByMid(Mi_d) nya = ap[0].members for a in nya: Mi_d = str(a.mid) klis=[kr] team=random.choice(klis) kr.findAndAddContactsByMid(Mi_d) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) kr.createGroup(gName, semua) team.findAndAddContactsByMid(Mi_d) team.createGroup(gName, semua) team.createGroup(gName, semua) team.createGroup(gName, semua) team.createGroup(gName, semua) team.createGroup(gName, semua) team.createGroup(gName, semua) elif "Crecover" in msg.text: if msg.from_ in owner: thisgroup = kr.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] kr.createGroup("Crecover", mi_d) kr.sendText(msg.to,"Success recover") elif "pepen spin" in msg.text: if msg.from_ in owner: thisgroup = kr.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.createGroup("Nah kan", mi_d) kr.sendText(msg.to,"Success...!!!!") elif msg.text in ["Remove all chat"]: if msg.from_ in owner: kr.removeAllMessages(op.param2) kr.removeAllMessages(op.param2) kr.sendText(msg.to,"Removed all chat Finish") elif msg.text in ["chery muach"]: if msg.from_ in owner: msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} kr.sendMessage(msg) elif msg.text in ["cherry","chery","Cherry","Chery"]: if msg.from_ in owner: kr.sendText(msg.to,"Cherry masih aktif Bebz...!!!") #============================================= if op.type == 17: if op.param2 not in Bots: if op.param2 in Bots: pass if wait["protect"] == True: if wait["blacklist"][op.param2] == True: try: kr.kickoutFromGroup(op.param1,[op.param2]) G = kr.getGroup(op.param1) G.preventJoinByTicket = True kr.updateGroup(G) except: try: kr.kickoutFromGroup(op.param1,[op.param2]) G = kr.getGroup(op.param1) G.preventJoinByTicket = True kr.updateGroup(G) except: pass if op.type == 19: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["protect"] == True: wait ["blacklist"][op.param2] = True kr.kickoutFromGroup(op.param1,[op.param2]) kr.inviteIntoGroup(op.param1,[op.param2]) if op.type == 13: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True kr.kickoutFromGroup(op.param1,[op.param2]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True kr.cancelGroupInvitation(op.param1,[op.param3]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["cancelprotect"] == True: wait ["blacklist"][op.param2] = True kr.cancelGroupInvitation(op.param1,[op.param3]) if op.type == 11: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["linkprotect"] == True: wait ["blacklist"][op.param2] = True G = kr.getGroup(op.param1) G.preventJoinByTicket = True kr.updateGroup(G) kr.kickoutFromGroup(op.param1,[op.param2]) if op.type == 5: if wait['autoAdd'] == True: if (wait['message'] in [""," ","\n",None]): pass else: kr.sendText(op.param1,str(wait['message'])) if op.type == 11: if wait["linkprotect"] == True: if op.param2 not in Bots: G = kr.getGroup(op.param1) G.preventJoinByTicket = True kr.kickoutFromGroup(op.param1,[op.param3]) kr.updateGroup(G) if op.type == 17: if wait["Wc"] == True: if op.param2 in Bots: return ginfo = kr.getGroup(op.param1) kr.sendText(op.param1, "╔═════════════\n║Selamat Datang Di " + str(ginfo.name) + "\n╠═════════════\n" + "║Founder =>>> " + str(ginfo.name) + " :\n║" + ginfo.creator.displayName + "\n╠═════════════\n" + "║😊Semoga Betah Kak 😘 \n╚═════════════") print "MEMBER HAS JOIN THE GROUP" if op.type == 15: if wait["Lv"] == True: if op.param2 in Bots: return kr.sendText(op.param1, "╔═════════════\n║Baper Tuh Orang :v \n║Semoga Bahagia ya 😊 \n╚═════════════") print "MEMBER HAS LEFT THE GROUP" #------------------------------------------------------------------------------# if op.type == 55: try: if op.param1 in wait2['readPoint']: if op.param2 in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += op.param2 wait2['ROM'][op.param1][op.param2] = op.param2 with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) else: pass except: pass if op.type == 59: print op except Exception as error: print error def autolike(): count = 1 while True: try: for posts in kr.activity(1)["result"]["posts"]: if posts["postInfo"]["liked"] is False: if wait['likeOn'] == True: kr.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001) print "Like" if wait["commentOn"] == True: if posts["userInfo"]["writerMid"] in wait["commentBlack"]: pass else: kr.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"]) except: count += 1 if(count == 50): sys.exit(0) else: pass thread2 = threading.Thread(target=autolike) thread2.daemon = True thread2.start() def likefriend(): for zx in range(0,20): hasil = kr.activity(limit=20) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: try: kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001) kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ ByPepen _Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ byKi&Pepen ⭐ 👈 »»» http://line.me/ti/p/~pepenlagiboker «««") print "Like" except: pass else: print "Already Liked Om" time.sleep(0.60) def likeme(): for zx in range(0,20): hasil = kr.activity(limit=20) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: if hasil['result']['posts'][zx]['userInfo']['mid'] in mid: try: kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Pepen Ganteng") print "Like" except: pass else: print "Status Sudah di Like Om" while True: try: Ops = kr.fetchOps(kr.Poll.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(kr.Poll.rev)) for Op in Ops: if (Op.type != OpType.END_OF_OPERATION): kr.Poll.rev = max(kr.Poll.rev, Op.revision) bot(Op)
helper.py
import os import time from collections import OrderedDict, defaultdict from contextlib import contextmanager from functools import wraps from itertools import chain, combinations from re import ASCII, MULTILINE, findall, match from threading import Thread from typing import ( Any, Callable, DefaultDict, Dict, FrozenSet, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, ) from urllib.parse import unquote from typing_extensions import TypedDict from zulipterminal.api_types import Composition, EmojiType, Message from zulipterminal.config.keys import primary_key_for_command from zulipterminal.config.regexes import ( REGEX_COLOR_3_DIGIT, REGEX_COLOR_6_DIGIT, REGEX_QUOTED_FENCE_LENGTH, ) from zulipterminal.config.ui_mappings import StreamAccessType class StreamData(TypedDict): name: str id: int color: str stream_access_type: StreamAccessType description: str class EmojiData(TypedDict): code: str aliases: List[str] type: EmojiType NamedEmojiData = Dict[str, EmojiData] class TidiedUserInfo(TypedDict): full_name: str email: str date_joined: str timezone: str role: Optional[int] last_active: str is_bot: bool # Below fields are only meaningful if is_bot == True bot_type: Optional[int] bot_owner_name: str class Index(TypedDict): pointer: Dict[str, Union[int, Set[None]]] # narrow_str, message_id # Various sets of downloaded message ids (all, starred, ...) all_msg_ids: Set[int] starred_msg_ids: Set[int] mentioned_msg_ids: Set[int] private_msg_ids: Set[int] private_msg_ids_by_user_ids: Dict[FrozenSet[int], Set[int]] stream_msg_ids_by_stream_id: Dict[int, Set[int]] topic_msg_ids: Dict[int, Dict[str, Set[int]]] # Extra cached information edited_messages: Set[int] # {message_id, ...} topics: Dict[int, List[str]] # {topic names, ...} search: Set[int] # {message_id, ...} # Downloaded message data by message id messages: Dict[int, Message] initial_index = Index( pointer=defaultdict(set), all_msg_ids=set(), starred_msg_ids=set(), mentioned_msg_ids=set(), private_msg_ids=set(), private_msg_ids_by_user_ids=defaultdict(set), stream_msg_ids_by_stream_id=defaultdict(set), topic_msg_ids=defaultdict(dict), edited_messages=set(), topics=defaultdict(list), search=set(), # mypy bug: https://github.com/python/mypy/issues/7217 messages=defaultdict(lambda: Message()), ) class UnreadCounts(TypedDict): all_msg: int all_pms: int all_mentions: int unread_topics: Dict[Tuple[int, str], int] # stream_id, topic unread_pms: Dict[int, int] # sender_id unread_huddles: Dict[FrozenSet[int], int] # Group pms streams: Dict[int, int] # stream_id def asynch(func: Callable[..., None]) -> Callable[..., None]: """ Decorator for executing a function in a separate :class:`threading.Thread`. """ @wraps(func) def wrapper(*args: Any, **kwargs: Any) -> Any: # If calling when pytest is running simply return the function # to avoid running in asynch mode. if os.environ.get("PYTEST_CURRENT_TEST"): return func(*args, **kwargs) thread = Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True return thread.start() return wrapper def _set_count_in_model( new_count: int, changed_messages: List[Message], unread_counts: UnreadCounts ) -> None: """ This function doesn't explicitly set counts in model, but updates `unread_counts` (which can update the model if it's passed in, but is not tied to it). """ # broader unread counts (for all_*) are updated # later conditionally in _set_count_in_view. KeyT = TypeVar("KeyT") def update_unreads(unreads: Dict[KeyT, int], key: KeyT) -> None: if key in unreads: unreads[key] += new_count if unreads[key] == 0: unreads.pop(key) elif new_count == 1: unreads[key] = new_count for message in changed_messages: if message["type"] == "stream": stream_id = message["stream_id"] update_unreads( unread_counts["unread_topics"], (stream_id, message["subject"]) ) update_unreads(unread_counts["streams"], stream_id) # self-pm has only one display_recipient # 1-1 pms have 2 display_recipient elif len(message["display_recipient"]) <= 2: update_unreads(unread_counts["unread_pms"], message["sender_id"]) else: # If it's a group pm update_unreads( unread_counts["unread_huddles"], frozenset( recipient["id"] for recipient in message["display_recipient"] ), ) def _set_count_in_view( controller: Any, new_count: int, changed_messages: List[Message], unread_counts: UnreadCounts, ) -> None: """ This function for the most part contains the logic for setting the count in the UI buttons. The later buttons (all_msg, all_pms) additionally set the current count in the model and make use of the same in the UI. """ stream_buttons_list = controller.view.stream_w.streams_btn_list is_open_topic_view = controller.view.left_panel.is_in_topic_view if is_open_topic_view: topic_buttons_list = controller.view.topic_w.topics_btn_list toggled_stream_id = controller.view.topic_w.stream_button.stream_id user_buttons_list = controller.view.user_w.users_btn_list all_msg = controller.view.home_button all_pm = controller.view.pm_button all_mentioned = controller.view.mentioned_button for message in changed_messages: user_id = message["sender_id"] # If we sent this message, don't increase the count if user_id == controller.model.user_id: continue msg_type = message["type"] add_to_counts = True if {"mentioned", "wildcard_mentioned"} & set(message["flags"]): unread_counts["all_mentions"] += new_count all_mentioned.update_count(unread_counts["all_mentions"]) if msg_type == "stream": stream_id = message["stream_id"] msg_topic = message["subject"] if controller.model.is_muted_stream(stream_id): add_to_counts = False # if muted, don't add to eg. all_msg else: for stream_button in stream_buttons_list: if stream_button.stream_id == stream_id: stream_button.update_count(stream_button.count + new_count) break # FIXME: Update unread_counts['unread_topics']? if controller.model.is_muted_topic(stream_id, msg_topic): add_to_counts = False if is_open_topic_view and stream_id == toggled_stream_id: # If topic_view is open for incoming messages's stream, # We update the respective TopicButton count accordingly. for topic_button in topic_buttons_list: if topic_button.topic_name == msg_topic: topic_button.update_count(topic_button.count + new_count) else: for user_button in user_buttons_list: if user_button.user_id == user_id: user_button.update_count(user_button.count + new_count) break unread_counts["all_pms"] += new_count all_pm.update_count(unread_counts["all_pms"]) if add_to_counts: unread_counts["all_msg"] += new_count all_msg.update_count(unread_counts["all_msg"]) def set_count(id_list: List[int], controller: Any, new_count: int) -> None: # This method applies new_count for 'new message' (1) or 'read' (-1) # (we could ensure this in a different way by a different type) assert new_count == 1 or new_count == -1 messages = controller.model.index["messages"] unread_counts: UnreadCounts = controller.model.unread_counts changed_messages = [messages[id] for id in id_list] _set_count_in_model(new_count, changed_messages, unread_counts) # if view is not yet loaded. Usually the case when first message is read. while not hasattr(controller, "view"): time.sleep(0.1) _set_count_in_view(controller, new_count, changed_messages, unread_counts) while not hasattr(controller, "loop"): time.sleep(0.1) controller.update_screen() def index_messages(messages: List[Message], model: Any, index: Index) -> Index: """ STRUCTURE OF INDEX { 'pointer': { '[]': 30 # str(ZulipModel.narrow) '[["stream", "verona"]]': 32, ... } 'topic_msg_ids': { 123: { # stream_id 'topic name': { 51234, # message id 56454, ... } }, 'private_msg_ids_by_user_ids': { (3, 7): { # user_ids frozenset 51234, 56454, ... }, (1, 2, 3, 4): { # multiple recipients 12345, 32553, } }, 'topics': { 123: [ # stread_id 'Denmark2', # topic name 'Verona2', .... ] }, 'all_msg_ids': { 14231, 23423, ... }, 'private_msg_ids': { 22334, 23423, ... }, 'mentioned_msg_ids': { 14423, 33234, ... }, 'stream_msg_ids_by_stream_id': { 123: { 53434, 36435, ... } 234: { 23423, 23423, ... } }, 'edited_messages':{ 51234, 23423, ... }, 'search': { 13242, 23423, 23423, ... }, 'messages': { # all the messages mapped to their id # for easy retrieval of message from id 45645: { # PRIVATE 'id': 4290, 'timestamp': 1521817473, 'content': 'Hi @**Cordelia Lear**', 'sender_full_name': 'Iago', 'flags': [], 'sender_email': 'iago@zulip.com', 'subject': '', 'subject_links': [], 'sender_id': 73, 'type': 'private', 'reactions': [], 'display_recipient': [ { 'email': 'ZOE@zulip.com', 'id': 70, 'full_name': 'Zoe', }, { 'email': 'cordelia@zulip.com', 'id': 71, 'full_name': 'Cordelia Lear', }, { 'email': 'hamlet@zulip.com', 'id': 72, 'full_name': 'King Hamlet', }, { 'email': 'iago@zulip.com', 'id': 73, 'full_name': 'Iago', } ] }, 45645: { # STREAM 'timestamp': 1521863062, 'sender_id': 72, 'sender_full_name': 'King Hamlet', 'content': 'https://github.com/zulip/zulip-terminal', 'type': 'stream', 'sender_email': 'hamlet@zulip.com', 'id': 4298, 'display_recipient': 'Verona', 'flags': [], 'reactions': [], 'subject': 'Verona2', 'stream_id': 32, }, }, } """ narrow = model.narrow for msg in messages: if "edit_history" in msg.keys(): index["edited_messages"].add(msg["id"]) index["messages"][msg["id"]] = msg if not narrow: index["all_msg_ids"].add(msg["id"]) elif model.is_search_narrow(): index["search"].add(msg["id"]) continue if len(narrow) == 1: if narrow[0][1] == "starred": if "starred" in msg["flags"]: index["starred_msg_ids"].add(msg["id"]) if narrow[0][1] == "mentioned": if {"mentioned", "wildcard_mentioned"} & set(msg["flags"]): index["mentioned_msg_ids"].add(msg["id"]) if msg["type"] == "private": index["private_msg_ids"].add(msg["id"]) recipients = frozenset( {recipient["id"] for recipient in msg["display_recipient"]} ) if narrow[0][0] == "pm_with": narrow_emails = [ model.user_dict[email]["user_id"] for email in narrow[0][1].split(", ") ] + [model.user_id] if recipients == frozenset(narrow_emails): index["private_msg_ids_by_user_ids"][recipients].add(msg["id"]) if msg["type"] == "stream" and msg["stream_id"] == model.stream_id: index["stream_msg_ids_by_stream_id"][msg["stream_id"]].add(msg["id"]) if ( msg["type"] == "stream" and len(narrow) == 2 and narrow[1][1] == msg["subject"] ): topics_in_stream = index["topic_msg_ids"][msg["stream_id"]] if not topics_in_stream.get(msg["subject"]): topics_in_stream[msg["subject"]] = set() topics_in_stream[msg["subject"]].add(msg["id"]) return index def classify_unread_counts(model: Any) -> UnreadCounts: # TODO: support group pms unread_msg_counts = model.initial_data["unread_msgs"] unread_counts = UnreadCounts( all_msg=0, all_pms=0, all_mentions=0, unread_topics=dict(), unread_pms=dict(), unread_huddles=dict(), streams=defaultdict(int), ) mentions_count = len(unread_msg_counts["mentions"]) unread_counts["all_mentions"] += mentions_count for pm in unread_msg_counts["pms"]: count = len(pm["unread_message_ids"]) unread_counts["unread_pms"][pm["sender_id"]] = count unread_counts["all_msg"] += count unread_counts["all_pms"] += count for stream in unread_msg_counts["streams"]: count = len(stream["unread_message_ids"]) stream_id = stream["stream_id"] # unsubscribed streams may be in raw unreads, but are not tracked if not model.is_user_subscribed_to_stream(stream_id): continue if model.is_muted_topic(stream_id, stream["topic"]): continue stream_topic = (stream_id, stream["topic"]) unread_counts["unread_topics"][stream_topic] = count if not unread_counts["streams"].get(stream_id): unread_counts["streams"][stream_id] = count else: unread_counts["streams"][stream_id] += count if stream_id not in model.muted_streams: unread_counts["all_msg"] += count # store unread count of group pms in `unread_huddles` for group_pm in unread_msg_counts["huddles"]: count = len(group_pm["unread_message_ids"]) user_ids = group_pm["user_ids_string"].split(",") user_ids = frozenset(map(int, user_ids)) unread_counts["unread_huddles"][user_ids] = count unread_counts["all_msg"] += count unread_counts["all_pms"] += count return unread_counts def match_user(user: Any, text: str) -> bool: """ Matches if the user full name, last name or email matches with `text` or not. """ full_name = user["full_name"].lower() keywords = full_name.split() # adding full_name helps in further narrowing down the right user. keywords.append(full_name) keywords.append(user["email"].lower()) for keyword in keywords: if keyword.startswith(text.lower()): return True return False def match_user_name_and_email(user: Any, text: str) -> bool: """ Matches if the user's full name, last name, email or a combination in the form of "name <email>" matches with `text`. """ full_name = user["full_name"].lower() email = user["email"].lower() keywords = full_name.split() keywords.append(full_name) keywords.append(email) keywords.append(f"{full_name} <{email}>") for keyword in keywords: if keyword.startswith(text.lower()): return True return False def match_emoji(emoji: str, text: str) -> bool: """ True if the emoji matches with `text` (case insensitive), False otherwise. """ return emoji.lower().startswith(text.lower()) def match_topics(topic_names: List[str], search_text: str) -> List[str]: return [ name for name in topic_names if name.lower().startswith(search_text.lower()) ] DataT = TypeVar("DataT") def match_stream( data: List[Tuple[DataT, str]], search_text: str, pinned_streams: List[StreamData] ) -> Tuple[List[DataT], List[str]]: """ Returns a list of DataT (streams) and a list of their corresponding names whose words match with the 'text' in the following order: * 1st-word startswith match > 2nd-word startswith match > ... (pinned) * 1st-word startswith match > 2nd-word startswith match > ... (unpinned) Note: This function expects `data` to be sorted, in a non-decreasing order, and ordered by their pinning status. """ pinned_stream_names = [stream["name"] for stream in pinned_streams] # Assert that the data is sorted, in a non-decreasing order, and ordered by # their pinning status. assert data == sorted( sorted(data, key=lambda data: data[1].lower()), key=lambda data: data[1] in pinned_stream_names, reverse=True, ) delimiters = "-_/" trans = str.maketrans(delimiters, len(delimiters) * " ") stream_splits = [ ((datum, [stream_name] + stream_name.translate(trans).split()[1:])) for datum, stream_name in data ] matches: "OrderedDict[str, DefaultDict[int, List[Tuple[DataT, str]]]]" = ( OrderedDict( [ ("pinned", defaultdict(list)), ("unpinned", defaultdict(list)), ] ) ) for datum, splits in stream_splits: stream_name = splits[0] kind = "pinned" if stream_name in pinned_stream_names else "unpinned" for match_position, word in enumerate(splits): if word.lower().startswith(search_text.lower()): matches[kind][match_position].append((datum, stream_name)) ordered_matches = [] ordered_names = [] for matched_data in matches.values(): if not matched_data: continue for match_position in range(max(matched_data.keys()) + 1): for datum, name in matched_data.get(match_position, []): if datum not in ordered_matches: ordered_matches.append(datum) ordered_names.append(name) return ordered_matches, ordered_names def match_group(group_name: str, text: str) -> bool: """ True if any group name matches with `text` (case insensitive), False otherwise. """ return group_name.lower().startswith(text.lower()) def format_string(names: List[str], wrapping_text: str) -> List[str]: """ Wrap a list of names using the wrapping characters for typeahead """ return [wrapping_text.format(name) for name in names] def powerset( iterable: Iterable[Any], map_func: Callable[[Any], Any] = set ) -> List[Any]: """ >> powerset([1,2,3]) returns: [set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]" """ s = list(iterable) powerset = chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) return list(map(map_func, list(powerset))) def canonicalize_color(color: str) -> str: """ Given a color of the format '#xxxxxx' or '#xxx', produces one of the format '#xxx'. Always produces lowercase hex digits. """ if match(REGEX_COLOR_6_DIGIT, color, ASCII) is not None: # '#xxxxxx' color, stored by current zulip server return (color[:2] + color[3] + color[5]).lower() elif match(REGEX_COLOR_3_DIGIT, color, ASCII) is not None: # '#xxx' color, which may be stored by the zulip server <= 2.0.0 # Potentially later versions too return color.lower() else: raise ValueError(f'Unknown format for color "{color}"') def display_error_if_present(response: Dict[str, Any], controller: Any) -> None: if response["result"] == "error" and hasattr(controller, "view"): controller.report_error([response["msg"]]) def check_narrow_and_notify( outer_narrow: List[Any], inner_narrow: List[Any], controller: Any ) -> None: current_narrow = controller.model.narrow if ( current_narrow != [] and current_narrow != outer_narrow and current_narrow != inner_narrow ): key = primary_key_for_command("NARROW_MESSAGE_RECIPIENT") controller.report_success( [ f"Message is sent outside of current narrow. Press [{key}] to narrow to conversation." ], duration=6, ) def notify_if_message_sent_outside_narrow( message: Composition, controller: Any ) -> None: current_narrow = controller.model.narrow if message["type"] == "stream": stream_narrow = [["stream", message["to"]]] topic_narrow = stream_narrow + [["topic", message["subject"]]] check_narrow_and_notify(stream_narrow, topic_narrow, controller) elif message["type"] == "private": pm_narrow = [["is", "private"]] recipient_emails = [ controller.model.user_id_email_dict[user_id] for user_id in message["to"] ] pm_with_narrow = [["pm_with", ", ".join(recipient_emails)]] check_narrow_and_notify(pm_narrow, pm_with_narrow, controller) def hash_util_decode(string: str) -> str: """ Returns a decoded string given a hash_util_encode() [present in zulip/zulip's zerver/lib/url_encoding.py] encoded string. """ # Acknowledge custom string replacements in zulip/zulip's # zerver/lib/url_encoding.py before unquote. return unquote(string.replace(".", "%")) def get_unused_fence(content: str) -> str: """ Generates fence for quoted-message based on regex pattern of continuous back-ticks. Referred and translated from zulip/static/shared/js/fenced_code.js. """ max_length_fence = 3 matches = findall(REGEX_QUOTED_FENCE_LENGTH, content, flags=MULTILINE) if len(matches) != 0: max_length_fence = max(max_length_fence, len(max(matches, key=len)) + 1) return "`" * max_length_fence @contextmanager def suppress_output() -> Iterator[None]: """ Context manager to redirect stdout and stderr to /dev/null. Adapted from https://stackoverflow.com/a/2323563 """ stdout = os.dup(1) stderr = os.dup(2) os.close(1) os.close(2) os.open(os.devnull, os.O_RDWR) try: yield finally: os.dup2(stdout, 1) os.dup2(stderr, 2)
threading_lock.py
import logging import threading import time logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s') logger = logging.getLogger(__name__) def some_func(delay, repeat, lock): logger.debug('started') lock.acquire() logger.debug('lock acquired') for _ in range(repeat): time.sleep(delay) logger.debug(time.ctime()) lock.release() logger.debug('lock released') logger.debug('completed') def some_func_cm(delay, repeat, lock): logger.debug('started') #using Lock's context manager, acquire() called when block entered, #release() called when block exited with lock: for _ in range(repeat): time.sleep(delay) logger.debug(time.ctime()) logger.debug('completed') def main(): lock = threading.Lock() t1 = threading.Thread(target=some_func, args=(1, 5, lock)) t2 = threading.Thread(target=some_func_cm, args=(2, 5, lock)) t1.start() t2.start() logger.debug('main completed') if __name__ == '__main__': main()
network_scanner_without_mac.py
#-*-coding:utf8;-*- #qpy:3 #qpy:console import threading import os import socket from datetime import datetime import sys import subprocess as sub from subprocess import PIPE, run from subprocess import check_output class bcolors: GREEN_IP = '\033[92m' ENDC = '\033[0m' WARNING_PORT = '\033[33m' END = '\033[0m' WARNING_PORT_PORT = '\033[36m' END2 = '\033[0m' hostdevice = '\033[44m' active_device = '\033[41m' clara = '\33[101m' mario = '\33[93m' command = ['ip', 'route'] result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True) final = result.returncode, result.stdout, result.stderr hk = final[1] hkj = hk.strip().split(" ")[-1] print("\nYour IP-Address at the moment: "+ bcolors.GREEN_IP + hkj + bcolors.ENDC + "\n") def tcp_scan(network_ip): pass def check_for_firewall_ping_block(router): print(bcolors.clara + "[*] Checking if ping isn't blocked by firewall"+bcolors.END) check_one = os.system("ping -c 2 -W 2 " + router + " >/dev/null 2>&1") if check_one == 0: print(bcolors.GREEN_IP + "\n[√] seems to be good" + bcolors.END) print(bcolors.WARNING_PORT_PORT + "\n[*] starting last check (pinging google) ..." + bcolors.END) check_two = os.system("ping -c 2 -W 2 google.com >/dev/null 2>&1") if check_two == 0: print(bcolors.GREEN_IP + "\n[√] Success!!! Ping is allowed at current network" + bcolors.END) print("\n[*] continue scanning network\n") return True else: print(bcolors.active_device + "\nMaybe the wrong Gateway ..." + bcolors.END) print(bcolors.GREEN_IP + "\nTrying to ping Google ...") check_two = os.system("ping -c 2 -W 2 google.com >/dev/null 2>&1") if check_two == 0: print(bcolors.GREEN_IP + "\n[√] Success!!! Ping is allowed at current network" + bcolors.END) print("\n[*] continue scanning network\n") return True else: print(bcolors.active_device + "\n[!] WARNING! Ping scan won't work on this network! Do you want to use TCP Scan?" + bcolors.END) tcp_ask = input("Do you want to use TCP Scan? (Y/N)") if tcp_ask == "Y": tcp_scan(router) else: sys.exit() def ports_matching(port_nummer): with open("service-names-port-numbers.csv", "r") as file: a = 0 ports = {} for i in file: i_sp = i.strip().split(",") a = a + 1 if a > 10000: break else: #print(i_sp[0] + " " + i_sp[1]) #ports[str(i_sp[0])] = str(i_sp[]) try: if i_sp[0] == None: continue else: if i_sp[3] == "Unassigned": continue else: if i_sp[1] in ports: continue else: ports[i_sp[1]] = i_sp[0] except: pass return ports[str(port_nummer)] def pscan(hostname): target = hostname targetIP = socket.gethostbyname(target) tstart = datetime.now() ports = [] try: for p in range(1, 1200): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) res = sock.connect_ex((targetIP, p)) if res == 0: try: #portz = ports_matching(p) ports.append(p) #print("open port " + bcolors.WARNING_PORT_PORT + str(p) + bcolors.END2 + " ---> " +portz) except: print("open port " + bcolors.WARNING_PORT_PORT + str(p) + bcolors.END2) sock.close() except Exception: print("There was an Error.") sys.exit() tend = datetime.now() diff = tend - tstart for i in ports: try: portz = ports_matching(i) print("open port " + bcolors.WARNING_PORT_PORT + str(i) + bcolors.END2 + " ---> " + portz) except: print("open port " + bcolors.WARNING_PORT_PORT + str(i) + bcolors.END2) print(bcolors.WARNING_PORT + "\nScan completed in " + str(diff) + bcolors.ENDC) def scan(targ): temp = targ t=threading.Thread(target=pscan, args=(temp,)) t.start() def check_ping(hostname): response = os.system("ping -c 1 -W 1 " + hostname+" >/dev/null 2>&1") if response == 0: print("Active Devices(IP): " + bcolors.hostdevice + hostname + bcolors.ENDC + "\nName of the device: " + bcolors.active_device + socket.getfqdn(hostname) + bcolors.ENDC + "\n") pingstatus = True else: pingstatus = False return pingstatus def ipscan(): output_ip = [] print(bcolors.mario + "Do you wanna get the IP address automatically?(Y/N)" + bcolors.ENDC) kla = input() if kla == "Y": hjks = hkj.strip().split(".") auto_ip = str(hjks[0] + "." + hjks[1] + "." + hjks[2] + ".") for i in range(0,256): host = auto_ip + str(i) output_ip.append(host) else: thehost = input("Enter your IP range (e.g. 192.168.178.) : ") for i in range(0,256): host = thehost + str(i) output_ip.append(host) return output_ip command = ['ip', 'neigh'] result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True) final = result.returncode, result.stdout, result.stderr router_ip = final[1].split(" ")[0] mac_of_router = final[1].split(" ")[4] print("Default gateway router:") print("---> "+str(router_ip)) print("---> "+str(mac_of_router)) print() check_for_firewall_ping_block(router_ip) hosts = ipscan() num_hosts = len(hosts) for k in range(num_hosts): temp = hosts[k] t=threading.Thread(target=check_ping, args=(temp,)) t.start() print("Do you wanna scan again? (Y/N)") g = input() if g == "Y": for k in range(num_hosts): temp = hosts[k] t=threading.Thread(target=check_ping, args=(temp,)) t.start() jale = 1 while jale > 0: qes = input("Do you wanna scan a device with a port scan? (Y/N): ") if qes == "Y": iiptar = input("Enter your target device IP: ") scan(iiptar) jale = 0 else: sys.exit()
data_loader.py
""" Manages downloading and storing the MNIST dataset. """ # This forks processes, so we want to import it as soon as possible, when there # is as little memory as possible being used. from common.data_manager import cache, image_getter, imagenet import cPickle as pickle import gzip import json import logging import os import random import urllib2 import signal import sys import threading import cv2 import numpy as np import theano import theano.tensor as TT MNIST_URL = "http://deeplearning.net/data/mnist/mnist.pkl.gz" MNIST_FILE = "mnist.pkl.gz" logger = logging.getLogger(__name__) class Loader(object): """ Generic superclass for anything that loads input data. """ def __init__(self): self._shared_train_set = [None, None] self._shared_test_set = [None, None] self._shared_valid_set = [None, None] self._train_set_size = None self._test_set_size = None self._valid_set_size = None def _shared_dataset(self, data, shared_set): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. Args: data: The data to load. shared_set: The shared variables to load it into. Returns: Symbolic shared variable containing the dataset. """ data_x, data_y = data data_y = np.asarray(data_y) if shared_set == [None, None]: # The shared variables weren't initialized yet. shared_set[0] = theano.shared(data_x.astype(theano.config.floatX)) shared_set[1] = theano.shared(data_y.astype(theano.config.floatX)) else: # They are initialized, we just need to set new values. shared_set[0].set_value(data_x.astype(theano.config.floatX)) shared_set[1].set_value(data_y.astype(theano.config.floatX)) def __cast_dataset(self, dataset): """ To store it on the GPU, it needs to be of type float32, however, the labels need to be type int, so we use this little casting hack. Args: dataset: The dataset to operate on. Returns: A version of dataset with the labels casted. """ images, labels = dataset return (images, TT.cast(labels, "int32")) def get_train_set(self): """ Returns: The training set. """ return self.__cast_dataset(self._shared_train_set) def get_test_set(self): """ Returns: The testing set. """ return self.__cast_dataset(self._shared_test_set) def get_valid_set(self): """ Returns: The validation set. """ return self.__cast_dataset(self._shared_valid_set) def get_train_batch_size(self): """ Returns: The size of the training batches. """ return self._train_batch_size def get_test_batch_size(self): """ Returns: The size of the testing batches. """ return self._test_batch_size def get_valid_batch_size(self): """ Returns: The size of the validation batches. """ return self._valid_batch_size class Mnist(Loader): """ Deals with the MNIST dataset. Args: use_4d: If True, it will reshape the inputs to 4D tensors for use in a CNN. Defaults to False. """ def __init__(self, use_4d=False): super(Mnist, self).__init__() self.__load(use_4d) def __download_mnist(self): """ Downloads the mnist dataset from MNIST_URL. """ logger.info("Downloading MNIST data...") response = urllib2.urlopen(MNIST_URL) data = response.read() # Save it to a file. mnist_file = open(MNIST_FILE, "w") mnist_file.write(data) mnist_file.close() def __load(self, use_4d): """ Loads mnist dataset from the disk, or downloads it first if it isn't present. Args: use_4d: If True, it will reshape the inputs to a 4D tensor for use in a CNN. Returns: A training set, testing set, and a validation set. """ if not os.path.exists(MNIST_FILE): # Download it first. self.__download_mnist() logger.info("Loading MNIST from disk...") mnist_file = gzip.open(MNIST_FILE, "rb") train_set, test_set, valid_set = pickle.load(mnist_file) mnist_file.close() # Reshape if we need to. if use_4d: logger.debug("Note: Using 4D tensor representation. ") train_x, train_y = train_set test_x, test_y = test_set valid_x, valid_y = valid_set train_x = train_x.reshape(-1, 1, 28, 28) test_x = test_x.reshape(-1, 1, 28, 28) valid_x = valid_x.reshape(-1, 1, 28, 28) train_set = (train_x, train_y) test_set = (test_x, test_y) valid_set = (valid_x, valid_y) self._train_set_size = train_set[1].shape[0] self._test_set_size = test_set[1].shape[0] self._valid_set_size = valid_set[1].shape[0] # Copy to shared variables. self._shared_dataset(train_set, self._shared_train_set) self._shared_dataset(test_set, self._shared_test_set) self._shared_dataset(valid_set, self._shared_valid_set) class DataManagerLoader(Loader): """ Loads datasets concurrently with the help of the data_manager package. """ def __init__(self, batch_size, load_batches, image_shape, cache_location, dataset_location, patch_shape=None, patch_flip=True): """ Args: batch_size: How many images are in each batch. load_batches: How many batches to have in VRAM at any given time. image_shape: The shape of the images that will be loaded. cache_location: The location of the image cache. dataset_location: The common part of the path to the files that we will be loading our training and testing datasets from. patch_shape: The shape of the patches that will be extracted from the images. If None, no patches will be extracted, and the raw images will be used directly. patch_flip: Whether to include flipped patches. """ super(DataManagerLoader, self).__init__() self._image_shape = image_shape self._cache_location = cache_location self._dataset_location = dataset_location self._patch_shape = patch_shape self._patch_flip = patch_flip # Register signal handlers. signal.signal(signal.SIGTERM, self.__on_signal) signal.signal(signal.SIGINT, self.__on_signal) self._buffer_size = batch_size * load_batches logger.debug("Nominal buffer size: %d" % (self._buffer_size)) # Handle to the actual buffers containing images. self.__training_buffer = None self.__testing_buffer = None self.__training_labels = None self.__testing_labels = None self._train_batch_size = self._buffer_size if not self._patch_shape: # No patches. self._test_batch_size = self._buffer_size elif not patch_flip: # We only have five patches. self._test_batch_size = self._buffer_size * 5 else: # We have to account for all the patches. self._test_batch_size = self._buffer_size * 10 # This is how we'll actually get images. self._init_image_getter() # Lock that we use to make sure we are only getting one batch at a time. self.__image_getter_lock = threading.Lock() # These are used to signal the loader thread to load more data, and the main # thread to copy the loaded data. self.__train_buffer_empty = threading.Lock() self.__train_buffer_full = threading.Lock() self.__test_buffer_empty = threading.Lock() self.__test_buffer_full = threading.Lock() # Lock to protect accesses to data in CPU memory. self.__train_cpu_lock = threading.Lock() self.__test_cpu_lock = threading.Lock() self.__batch_size = batch_size self.__load_batches = load_batches # Force it to wait for data initially. self.__train_buffer_full.acquire() self.__test_buffer_full.acquire() # Labels have to be integers, so that means we have to map labels to # integers. self.__labels = {} # Map that goes in the opposite direction. self.__reverse_labels = {} self.__current_label = 0 # Image ID values for the loaded images. self.__training_names = [] self.__testing_names = [] # This is an event that signals to the internal threads that it's time to # exit. self.__exit_event = threading.Event() # Start the loader threads. self._init_loader_threads() self.__cleaned_up = False def __del__(self): """ Cleanup upon program exit. """ self.exit_gracefully() def __on_signal(self, *args, **kwargs): """ Upon receiving a signal, it cleans up and exits the program. """ logger.error("Got signal, exiting.") self.exit_gracefully() sys.exit(1) def exit_gracefully(self): """ Stop the threads and exit properly. """ if self.__cleaned_up: # We don't need to do this again. return logger.info("Data loader system is exiting NOW.") # Signal internal threads that it's time to quit. self.__exit_event.set() # Release all the locks so nothing can be blocking on them. try: self.__train_buffer_empty.release() except threading.ThreadError: pass try: self.__train_buffer_full.release() except threading.ThreadError: pass try: self.__test_buffer_empty.release() except threading.ThreadError: pass try: self.__test_buffer_full.release() except threading.ThreadError: pass # Wait for the internal threads to join. self._join_loader_threads() # Cleanup the image getter. self._image_getter.cleanup() self.__cleaned_up = True def _init_image_getter(self): """ Initializes the specific ImageGetter that we will use to get images. This can be overriden by subclasses to add specific functionality. """ self._image_getter = \ image_getter.ImageGetter(self._cache_location, self._buffer_size, self._image_shape, preload_batches=2, load_datasets_from=self._dataset_location, patch_shape=self._patch_shape, patch_flip=self._patch_flip) def _init_loader_threads(self): """ Starts the training and testing loader threads. """ self._test_thread = threading.Thread(target=self._run_test_loader_thread) self._test_thread.start() self._train_thread = threading.Thread(target=self._run_train_loader_thread) self._train_thread.start() def _join_loader_threads(self): """ Joins the training and testing loader threads. If you override _init_loader_threads(), you should probably override this method too. """ logger.info("Joining threads...") self._train_thread.join() self._test_thread.join() def _load_raw_training_batch(self): """ Loads raw image and label data from somewhere. This can be overriden by subclasses to add specific functionality. Returns: The loaded images, labels, and names. """ return self._image_getter.get_random_train_batch() def _load_raw_testing_batch(self): """ Loads raw image and label data from somewhere. This can be overriden by subclasses to add specific functionality. Returns: The loaded images, labels, and names. """ return self._image_getter.get_random_test_batch() def __convert_labels_to_ints(self, labels): """ Converts a set of labels from the default label names to integers, so that they can actually be used in the network. Args: labels: The labels to convert. Returns: A list of the converted labels. """ converted = [] for label in labels: if label in self.__labels: converted.append(self.__labels[label]) else: # This is the first time we've seen this label. converted.append(self.__current_label) self.__labels[label] = self.__current_label self.__reverse_labels[self.__current_label] = label self.__current_label += 1 return converted def __load_next_training_batch(self): """ Loads the next batch of training data from the Imagenet backend. """ self.__training_buffer, labels, names = self._load_raw_training_batch() logger.debug("Got raw labels: %s" % (labels)) mean = np.mean(self.__training_buffer).astype(theano.config.floatX) logger.debug("Training mean: %f" % mean) self.__train_cpu_lock.acquire() self.__training_names = names # Convert labels. self.__training_labels = self.__convert_labels_to_ints(labels) self.__train_cpu_lock.release() self.__training_buffer = self.__training_buffer.astype(theano.config.floatX) self.__training_buffer -= mean def __load_next_testing_batch(self): """ Loads the next batch of testing data from the Imagenet backend. """ self.__testing_buffer, labels, names = self._load_raw_testing_batch() logger.debug("Got raw labels: %s" % (labels)) mean = np.mean(self.__testing_buffer).astype(theano.config.floatX) logger.debug("Testing mean: %f" % mean) self.__test_cpu_lock.acquire() self.__testing_names = names # Convert labels. self.__testing_labels = self.__convert_labels_to_ints(labels) self.__test_cpu_lock.release() self.__testing_buffer = self.__testing_buffer.astype(theano.config.floatX) self.__testing_buffer -= mean def _run_train_loader_thread(self): """ The main function for the thread to load training data. """ while True: # Make sure we don't write over our old batch. self.__train_buffer_empty.acquire() self.__image_getter_lock.acquire() logger.info("Loading next training batch from imagenet...") self.__load_next_training_batch() logger.info("Done loading next training batch.") thread_error = None try: self.__image_getter_lock.release() # Allow the main thread to use what we loaded. self.__train_buffer_full.release() except threading.ThreadError as e: # The only way this should happen is if we hit an exit condition. thread_error = e if self.__exit_event.is_set(): logger.info("Got exit event, terminating train loader thread.") return if thread_error: raise thread_error def _run_test_loader_thread(self): """ The main function for the thread to load training data. """ while True: # Make sure we don't write over our old batch. self.__test_buffer_empty.acquire() self.__image_getter_lock.acquire() logger.info("Loading next testing batch from imagenet...") self.__load_next_testing_batch() logger.info("Done loading next testing batch.") thread_error = None try: self.__image_getter_lock.release() # Allow the main thread to use what we loaded. self.__test_buffer_full.release() except threading.ThreadError as e: # The only way this should happen is if we hit an exit condition. thread_error = e if self.__exit_event.is_set(): logger.info("Got exit event, terminating test loader thread.") return if thread_error: raise thread_error def __swap_in_training_data(self): """ Takes training data buffered into system memory and loads it into VRAM for immediate use. """ logger.info("Waiting for new training data to be ready...") self.__train_buffer_full.acquire() logger.info("Loading new training dataset into VRAM...") self._shared_dataset((self.__training_buffer, self.__training_labels), self._shared_train_set) # Allow it to load another batch. self.__train_buffer_empty.release() def __swap_in_testing_data(self): """ Takes testing data buffered into system memory and loads it into VRAM for immediate use. """ logger.info("Waiting for new testing data to be ready...") self.__test_buffer_full.acquire() logger.info("Loading new testing data into VRAM...") self._shared_dataset((self.__testing_buffer, self.__testing_labels), self._shared_test_set) # Allow it to load another batch. self.__test_buffer_empty.release() def get_train_set(self): # Load a new set for it. self.__swap_in_training_data() return super(DataManagerLoader, self).get_train_set() def get_test_set(self): # Load a new set for it. self.__swap_in_testing_data() return super(DataManagerLoader, self).get_test_set() def get_non_shared_test_set(self): """ Returns: A non-shared version of the test set, useful for AlexNet. """ self.__test_cpu_lock.acquire() labels = self.__testing_labels[:] self.__test_cpu_lock.release() return labels def get_non_shared_train_set(self): """ Returns: A non-shared version of the train set. """ self.__train_cpu_lock.acquire() labels = self.__training_labels[:] self.__train_cpu_lock.release() return labels def get_test_names(self): """ Returns: A list of the image names of the loaded images for the test set. """ self.__test_cpu_lock.acquire() names = self.__testing_names[:] self.__test_cpu_lock.release() return names def get_train_names(self): """ Returns: A list of the image names of the loaded images for the train set. """ self.__train_cpu_lock.acquire() names = self.__training_names[:] self.__train_cpu_lock.release() return names def get_train_set_size(self): """ Returns: The total number of images in the training dataset. """ return self._image_getter.get_train_set_size() def get_test_set_size(self): """ Returns: The total number of images in the testing dataset. """ return self._image_getter.get_test_set_size() def save(self, filename): """ Allows the saving of label associations for later use. Args: filename: The name of the file to write the saved data to. """ file_object = open(filename, "wb") pickle.dump((self.__labels, self.__reverse_labels, self.__current_label), file_object) file_object.close() def load(self, filename): """ Loads label associations that have been saved to a file. Args: filename: The name of the file to load from. """ file_object = file(filename, "rb") self.__labels, self.__reverse_labels, self.__current_label = \ pickle.load(file_object) logger.debug("Starting at label %d." % (self.__current_label)) file_object.close() def convert_ints_to_labels(self, output): """ Converts the output from a network tester or predictor to the actual corresponding labels. Args: output: A list of numbers to convert. Returns: A list of the actual labels. """ labels = [] self.__train_cpu_lock.acquire() self.__test_cpu_lock.acquire() for number in output: labels.append(self.__reverse_labels[number]) self.__train_cpu_lock.release() self.__test_cpu_lock.release() return labels class SequentialDataManagerLoader(DataManagerLoader): """ Same as DataManagerLoader, but loads sequential batches instead of random batches. This is useful for doing things like performing validation. """ def _load_raw_training_batch(self): """ Loads raw image and label data from somewhere. This can be overriden by subclasses to add specific functionality. Returns: The loaded images and labels. """ return self._image_getter.get_sequential_train_batch() def _load_raw_testing_batch(self): """ Loads raw image and label data from somewhere. This can be overriden by subclasses to add specific functionality. Returns: The loaded images and labels. """ return self._image_getter.get_sequential_test_batch() class ImagenetLoader(DataManagerLoader): """ Loads data from imagenet. """ def __init__(self, batch_size, load_batches, cache_location, dataset_location, synset_location, synset_file): """ See superclass documentation for this method. Additional Args: synset_location: Where to store downloaded synset data. synset_file: The file to load the synsets to use from. """ self.__synset_location = synset_location self.__synset_file = synset_file super(ImagenetLoader, self).__init__(batch_size, load_batches, (256, 256, 3), cache_location, dataset_location, patch_shape=(244, 244)) def _init_image_getter(self): """ Initializes the specific ImageGetter that we will use to get images. """ self._image_getter = \ imagenet.SynsetFileImagenetGetter( \ self.__synset_file, self.__synset_location, self._cache_location, self._buffer_size, self._image_shape, preload_batches=2, load_datasets_from=self._dataset_location, patch_shape=self._patch_shape)
test_pyerrors.py
import pytest import sys import StringIO from pypy.module.cpyext.state import State from pypy.module.cpyext.pyobject import make_ref from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi class TestExceptions(BaseApiTest): def test_GivenExceptionMatches(self, space, api): old_style_exception = space.appexec([], """(): class OldStyle: pass return OldStyle """) exc_matches = api.PyErr_GivenExceptionMatches string_exception = space.wrap('exception') instance = space.call_function(space.w_ValueError) old_style_instance = space.call_function(old_style_exception) assert exc_matches(string_exception, string_exception) assert exc_matches(old_style_exception, old_style_exception) assert not exc_matches(old_style_exception, space.w_Exception) assert exc_matches(instance, space.w_ValueError) assert exc_matches(old_style_instance, old_style_exception) assert exc_matches(space.w_ValueError, space.w_ValueError) assert exc_matches(space.w_IndexError, space.w_LookupError) assert not exc_matches(space.w_ValueError, space.w_LookupError) exceptions = space.newtuple([space.w_LookupError, space.w_ValueError]) assert exc_matches(space.w_ValueError, exceptions) def test_ExceptionMatches(self, space, api): api.PyErr_SetObject(space.w_ValueError, space.wrap("message")) assert api.PyErr_ExceptionMatches(space.w_Exception) assert api.PyErr_ExceptionMatches(space.w_ValueError) assert not api.PyErr_ExceptionMatches(space.w_TypeError) api.PyErr_Clear() def test_Occurred(self, space, api): assert not api.PyErr_Occurred() string = rffi.str2charp("spam and eggs") api.PyErr_SetString(space.w_ValueError, string) rffi.free_charp(string) assert api.PyErr_Occurred() is space.w_ValueError api.PyErr_Clear() def test_SetObject(self, space, api): api.PyErr_SetObject(space.w_ValueError, space.wrap("a value")) assert api.PyErr_Occurred() is space.w_ValueError state = space.fromcache(State) operror = state.get_exception() assert space.eq_w(operror.get_w_value(space), space.wrap("a value")) api.PyErr_Clear() def test_SetNone(self, space, api): api.PyErr_SetNone(space.w_KeyError) state = space.fromcache(State) operror = state.get_exception() assert space.eq_w(operror.w_type, space.w_KeyError) assert space.eq_w(operror.get_w_value(space), space.w_None) api.PyErr_Clear() api.PyErr_NoMemory() operror = state.get_exception() assert space.eq_w(operror.w_type, space.w_MemoryError) api.PyErr_Clear() def test_Warning(self, space, api, capfd): message = rffi.str2charp("this is a warning") api.PyErr_WarnEx(None, message, 1) out, err = capfd.readouterr() assert ": UserWarning: this is a warning" in err rffi.free_charp(message) def test_print_err(self, space, api, capfd): api.PyErr_SetObject(space.w_Exception, space.wrap("cpyext is cool")) api.PyErr_Print() out, err = capfd.readouterr() assert "cpyext is cool" in err assert not api.PyErr_Occurred() def test_WriteUnraisable(self, space, api, capfd): api.PyErr_SetObject(space.w_ValueError, space.wrap("message")) w_where = space.wrap("location") api.PyErr_WriteUnraisable(w_where) out, err = capfd.readouterr() assert "Exception ValueError: 'message' in 'location' ignored" == err.strip() def test_ExceptionInstance_Class(self, space, api): instance = space.call_function(space.w_ValueError) assert api.PyExceptionInstance_Class(instance) is space.w_ValueError @pytest.mark.skipif(True, reason='not implemented yet') def test_interrupt_occurred(self, space, api): assert not api.PyOS_InterruptOccurred() import signal, os recieved = [] def default_int_handler(*args): recieved.append('ok') signal.signal(signal.SIGINT, default_int_handler) os.kill(os.getpid(), signal.SIGINT) assert recieved == ['ok'] assert api.PyOS_InterruptOccurred() def test_restore_traceback(self, space, api): string = rffi.str2charp("spam and eggs") api.PyErr_SetString(space.w_ValueError, string) state = space.fromcache(State) operror = state.clear_exception() # Fake a traceback. operror.set_traceback(space.w_True) # this doesn't really need to be a real traceback for this test. w_type = operror.w_type w_value = operror.get_w_value(space) w_tb = operror.get_w_traceback(space) assert not space.eq_w(w_tb, space.w_None) api.PyErr_Restore(make_ref(space, w_type), make_ref(space, w_value), make_ref(space, w_tb)) operror = state.clear_exception() w_tb_restored = operror.get_w_traceback(space) assert space.eq_w(w_tb_restored, w_tb) rffi.free_charp(string) class AppTestFetch(AppTestCpythonExtensionBase): def test_occurred(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyErr_SetString(PyExc_TypeError, "message"); PyErr_Occurred(); PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_fetch_and_restore(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_SetString(PyExc_TypeError, "message"); PyErr_Fetch(&type, &val, &tb); if (PyErr_Occurred()) return NULL; if (type != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); if (!PyErr_Occurred()) Py_RETURN_FALSE; PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_normalize(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_SetString(PyExc_TypeError, "message"); PyErr_Fetch(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if (!PyString_Check(val)) Py_RETURN_FALSE; /* Normalize */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_normalize_no_exception(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_Fetch(&type, &val, &tb); if (type != NULL) Py_RETURN_FALSE; if (val != NULL) Py_RETURN_FALSE; PyErr_NormalizeException(&type, &val, &tb); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_SetFromErrno(self): import sys if sys.platform != 'win32': skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrno(PyExc_OSError); return NULL; '''), ], prologue="#include <errno.h>") try: module.set_from_errno() except OSError as e: assert e.errno == errno.EBADF assert e.strerror == os.strerror(errno.EBADF) assert e.filename is None def test_SetFromErrnoWithFilename(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == "/path/to/file" if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilename_NULL(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename is None if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyString(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *filenameObject = PyString_FromString("/path/to/file"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == "/path/to/file" if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyInt(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *intObject = PyInt_FromLong(3); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == 3 if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyList(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); Py_DECREF(lst); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == [1, 2, "three"] if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyTuple(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); Py_DECREF(tuple); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == (1, 2, "three") if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__Py_None(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *none = Py_BuildValue(""); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); Py_DECREF(none); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename is None if self.runappdirect: # untranslated the errno can get reset by the calls to ll2ctypes assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): from sys import version_info if self.runappdirect and (version_info.major < 3 or version_info.minor < 3): skip('PyErr_{GS}etExcInfo introduced in python 3.3') module = self.import_extension('foo', [ ("display_error", "METH_VARARGS", r''' PyObject *type, *val, *tb; PyErr_GetExcInfo(&type, &val, &tb); PyErr_Display(type, val, tb); Py_XDECREF(type); Py_XDECREF(val); Py_XDECREF(tb); Py_RETURN_NONE; '''), ]) import sys, StringIO sys.stderr = StringIO.StringIO() try: 1 / 0 except ZeroDivisionError: module.display_error() finally: output = sys.stderr.getvalue() sys.stderr = sys.__stderr__ assert "in test_PyErr_Display\n" in output assert "ZeroDivisionError" in output @pytest.mark.skipif(True, reason= "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free") def test_GetSetExcInfo(self): import sys if self.runappdirect and (sys.version_info.major < 3 or sys.version_info.minor < 3): skip('PyErr_{GS}etExcInfo introduced in python 3.3') module = self.import_extension('foo', [ ("getset_exc_info", "METH_VARARGS", r''' PyObject *type, *val, *tb; PyObject *new_type, *new_val, *new_tb; PyObject *result; if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb)) return NULL; PyErr_GetExcInfo(&type, &val, &tb); Py_INCREF(new_type); Py_INCREF(new_val); Py_INCREF(new_tb); PyErr_SetExcInfo(new_type, new_val, new_tb); result = Py_BuildValue("OOO", type ? type : Py_None, val ? val : Py_None, tb ? tb : Py_None); Py_XDECREF(type); Py_XDECREF(val); Py_XDECREF(tb); return result; ''' ), ]) try: raise ValueError(5) except ValueError as old_exc: new_exc = TypeError("TEST") orig_sys_exc_info = sys.exc_info() orig_exc_info = module.getset_exc_info(new_exc.__class__, new_exc, None) new_sys_exc_info = sys.exc_info() new_exc_info = module.getset_exc_info(*orig_exc_info) reset_sys_exc_info = sys.exc_info() assert orig_exc_info[0] is old_exc.__class__ assert orig_exc_info[1] is old_exc assert orig_exc_info == orig_sys_exc_info assert orig_exc_info == reset_sys_exc_info assert new_exc_info == (new_exc.__class__, new_exc, None) assert new_exc_info == new_sys_exc_info def test_PyErr_BadInternalCall(self): # NB. it only seemed to fail when run with '-s'... but I think # that it always printed stuff to stderr module = self.import_extension('foo', [ ("oops", "METH_NOARGS", r''' PyErr_BadInternalCall(); return NULL; '''), ]) raises(SystemError, module.oops) def test_error_thread_race(self): # Check race condition: thread 0 returns from cpyext with error set, # after thread 1 has set an error but before it returns. module = self.import_extension('foo', [ ("emit_error", "METH_VARARGS", ''' PyThreadState *save = NULL; PyGILState_STATE gilsave; /* NB. synchronization due to GIL */ static volatile int flag = 0; int id; if (!PyArg_ParseTuple(args, "i", &id)) return NULL; /* Proceed in thread 1 first */ save = PyEval_SaveThread(); if (save == NULL) abort(); while (id == 0 && flag == 0); gilsave = PyGILState_Ensure(); if (gilsave != PyGILState_UNLOCKED) abort(); PyErr_Format(PyExc_ValueError, "%d", id); /* Proceed in thread 0 first */ if (id == 1) flag = 1; PyGILState_Release(gilsave); while (id == 1 && flag == 1); PyEval_RestoreThread(save); if (id == 0) flag = 0; return NULL; ''' ), ]) import threading failures = [] def worker(arg): try: module.emit_error(arg) failures.append(True) except Exception as exc: if str(exc) != str(arg): failures.append(exc) threads = [threading.Thread(target=worker, args=(j,)) for j in (0, 1)] for t in threads: t.start() for t in threads: t.join() assert not failures
program.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for TensorBoard command line program. This is a lightweight module for bringing up a TensorBoard HTTP server or emulating the `tensorboard` shell command. Those wishing to create custom builds of TensorBoard can use this module by swapping out `tensorboard.main` with the custom definition that modifies the set of plugins and static assets. This module does not depend on first-party plugins or the default web server assets. Those are defined in `tensorboard.default`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import argparse import atexit from collections import defaultdict import errno import inspect import logging import os import signal import socket import sys import threading import time import absl.logging import six from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin from werkzeug import serving from tensorboard import manager from tensorboard import version from tensorboard.backend import application from tensorboard.backend.event_processing import event_file_inspector as efi from tensorboard.plugins import base_plugin from tensorboard.plugins.core import core_plugin from tensorboard.util import tb_logging try: from absl import flags as absl_flags from absl.flags import argparse_flags except ImportError: # Fall back to argparse with no absl flags integration. absl_flags = None argparse_flags = argparse logger = tb_logging.get_logger() def setup_environment(): """Makes recommended modifications to the environment. This functions changes global state in the Python process. Calling this function is a good idea, but it can't appropriately be called from library routines. """ absl.logging.set_verbosity(absl.logging.WARNING) # The default is HTTP/1.0 for some strange reason. If we don't use # HTTP/1.1 then a new TCP socket and Python thread is created for # each HTTP request. The tradeoff is we must always specify the # Content-Length header, or do chunked encoding for streaming. serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1' def get_default_assets_zip_provider(): """Opens stock TensorBoard web assets collection. Returns: Returns function that returns a newly opened file handle to zip file containing static assets for stock TensorBoard, or None if webfiles.zip could not be found. The value the callback returns must be closed. The paths inside the zip file are considered absolute paths on the web server. """ path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'webfiles.zip') if not os.path.exists(path): logger.warning('webfiles.zip static assets not found: %s', path) return None return lambda: open(path, 'rb') class TensorBoard(object): """Class for running TensorBoard. Fields: plugin_loaders: Set from plugins passed to constructor. assets_zip_provider: Set by constructor. server_class: Set by constructor. flags: An argparse.Namespace set by the configure() method. cache_key: As `manager.cache_key`; set by the configure() method. """ def __init__(self, plugins=None, assets_zip_provider=None, server_class=None): """Creates new instance. Args: plugin: A list of TensorBoard plugins to load, as TBPlugin classes or TBLoader instances or classes. If not specified, defaults to first-party plugins. assets_zip_provider: Delegates to TBContext or uses default if None. server_class: An optional factory for a `TensorBoardServer` to use for serving the TensorBoard WSGI app. If provided, its callable signature should match that of `TensorBoardServer.__init__`. """ if plugins is None: from tensorboard import default plugins = default.get_plugins() if assets_zip_provider is None: assets_zip_provider = get_default_assets_zip_provider() if server_class is None: server_class = create_port_scanning_werkzeug_server self.plugin_loaders = [application.make_plugin_loader(p) for p in plugins] self.assets_zip_provider = assets_zip_provider self.server_class = server_class self.flags = None def configure(self, argv=('',), **kwargs): """Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid. """ parser = argparse_flags.ArgumentParser( prog='tensorboard', description=('TensorBoard is a suite of web applications for ' 'inspecting and understanding your TensorFlow runs ' 'and graphs. https://github.com/tensorflow/tensorboard ')) for loader in self.plugin_loaders: loader.define_flags(parser) arg0 = argv[0] if argv else '' flags = parser.parse_args(argv[1:]) # Strip binary name from argv. self.cache_key = manager.cache_key( working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs, ) if absl_flags and arg0: # Only expose main module Abseil flags as TensorBoard native flags. # This is the same logic Abseil's ArgumentParser uses for determining # which Abseil flags to include in the short helpstring. for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)): if hasattr(flags, flag.name): raise ValueError('Conflicting Abseil flag: %s' % flag.name) setattr(flags, flag.name, flag.value) for k, v in kwargs.items(): if not hasattr(flags, k): raise ValueError('Unknown TensorBoard flag: %s' % k) setattr(flags, k, v) for loader in self.plugin_loaders: loader.fix_flags(flags) self.flags = flags return [arg0] def main(self, ignored_argv=('',)): """Blocking main function for TensorBoard. This method is called by `tensorboard.main.run_main`, which is the standard entrypoint for the tensorboard command line program. The configure() method must be called first. Args: ignored_argv: Do not pass. Required for Abseil compatibility. Returns: Process exit code, i.e. 0 if successful or non-zero on failure. In practice, an exception will most likely be raised instead of returning non-zero. :rtype: int """ self._install_signal_handler(signal.SIGTERM, "SIGTERM") if self.flags.inspect: logger.info('Not bringing up TensorBoard, but inspecting event files.') event_file = os.path.expanduser(self.flags.event_file) efi.inspect(self.flags.logdir, event_file, self.flags.tag) return 0 if self.flags.version_tb: print(version.VERSION) return 0 try: server = self._make_server() sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' % (version.VERSION, server.get_url())) sys.stderr.flush() self._register_info(server) server.serve_forever() return 0 except TensorBoardServerException as e: logger.error(e.msg) sys.stderr.write('ERROR: %s\n' % e.msg) sys.stderr.flush() return -1 def launch(self): """Python API for launching TensorBoard. This method is the same as main() except it launches TensorBoard in a separate permanent thread. The configure() method must be called first. Returns: The URL of the TensorBoard web server. :rtype: str """ # Make it easy to run TensorBoard inside other programs, e.g. Colab. server = self._make_server() thread = threading.Thread(target=server.serve_forever, name='TensorBoard') thread.daemon = True thread.start() return server.get_url() def _register_info(self, server): """Write a TensorBoardInfo file and arrange for its cleanup. Args: server: The result of `self._make_server()`. """ server_url = urllib.parse.urlparse(server.get_url()) info = manager.TensorBoardInfo( version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir or self.flags.logdir_spec, db=self.flags.db, cache_key=self.cache_key, ) atexit.register(manager.remove_info_file) manager.write_info_file(info) def _install_signal_handler(self, signal_number, signal_name): """Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name. """ old_signal_handler = None # set below def handler(handled_signal_number, frame): # In case we catch this signal again while running atexit # handlers, take the hint and actually die. signal.signal(signal_number, signal.SIG_DFL) sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name) # The main thread is the only non-daemon thread, so it suffices to # exit hence. if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL): old_signal_handler(handled_signal_number, frame) sys.exit(0) old_signal_handler = signal.signal(signal_number, handler) def _make_server(self): """Constructs the TensorBoard WSGI app and instantiates the server.""" app = application.standard_tensorboard_wsgi(self.flags, self.plugin_loaders, self.assets_zip_provider) return self.server_class(app, self.flags) @six.add_metaclass(ABCMeta) class TensorBoardServer(object): """Class for customizing TensorBoard WSGI app serving.""" @abstractmethod def __init__(self, wsgi_app, flags): """Create a flag-configured HTTP server for TensorBoard's WSGI app. Args: wsgi_app: The TensorBoard WSGI application to create a server for. flags: argparse.Namespace instance of TensorBoard flags. """ raise NotImplementedError() @abstractmethod def serve_forever(self): """Blocking call to start serving the TensorBoard server.""" raise NotImplementedError() @abstractmethod def get_url(self): """Returns a URL at which this server should be reachable.""" raise NotImplementedError() class TensorBoardServerException(Exception): """Exception raised by TensorBoardServer for user-friendly errors. Subclasses of TensorBoardServer can raise this exception in order to generate a clean error message for the user rather than a stacktrace. """ def __init__(self, msg): self.msg = msg class TensorBoardPortInUseError(TensorBoardServerException): """Error raised when attempting to bind to a port that is in use. This should be raised when it is expected that binding to another similar port would succeed. It is used as a signal to indicate that automatic port searching should continue rather than abort. """ pass def with_port_scanning(cls): """Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`. """ def init(wsgi_app, flags): # base_port: what's the first port to which we should try to bind? # should_scan: if that fails, shall we try additional ports? # max_attempts: how many ports shall we try? should_scan = flags.port is None base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port max_attempts = 10 if should_scan else 1 if base_port > 0xFFFF: raise TensorBoardServerException( 'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF) ) max_attempts = 10 if should_scan else 1 base_port = min(base_port + max_attempts, 0x10000) - max_attempts for port in xrange(base_port, base_port + max_attempts): subflags = argparse.Namespace(**vars(flags)) subflags.port = port try: return cls(wsgi_app=wsgi_app, flags=subflags) except TensorBoardPortInUseError: if not should_scan: raise # All attempts failed to bind. raise TensorBoardServerException( 'TensorBoard could not bind to any port around %s ' '(tried %d times)' % (base_port, max_attempts)) return init class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer): """Implementation of TensorBoardServer using the Werkzeug dev server.""" # ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x. daemon_threads = True def __init__(self, wsgi_app, flags): self._flags = flags host = flags.host port = flags.port # Without an explicit host, we default to serving on all interfaces, # and will attempt to serve both IPv4 and IPv6 traffic through one # socket. self._auto_wildcard = not host if self._auto_wildcard: host = self._get_wildcard_address(port) self._fix_werkzeug_logging() try: super(WerkzeugServer, self).__init__(host, port, wsgi_app) except socket.error as e: if hasattr(errno, 'EACCES') and e.errno == errno.EACCES: raise TensorBoardServerException( 'TensorBoard must be run as superuser to bind to port %d' % port) elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE: if port == 0: raise TensorBoardServerException( 'TensorBoard unable to find any open port') else: raise TensorBoardPortInUseError( 'TensorBoard could not bind to port %d, it was already in use' % port) elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL: raise TensorBoardServerException( 'TensorBoard could not bind to unavailable address %s' % host) elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT: raise TensorBoardServerException( 'Tensorboard could not bind to unsupported address family %s' % host) # Raise the raw exception if it wasn't identifiable as a user error. raise def _get_wildcard_address(self, port): """Returns a wildcard address for the port in question. This will attempt to follow the best practice of calling getaddrinfo() with a null host and AI_PASSIVE to request a server-side socket wildcard address. If that succeeds, this returns the first IPv6 address found, or if none, then returns the first IPv4 address. If that fails, then this returns the hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0". """ fallback_address = '::' if socket.has_ipv6 else '0.0.0.0' if hasattr(socket, 'AI_PASSIVE'): try: addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_PASSIVE) except socket.gaierror as e: logger.warn('Failed to auto-detect wildcard address, assuming %s: %s', fallback_address, str(e)) return fallback_address addrs_by_family = defaultdict(list) for family, _, _, _, sockaddr in addrinfos: # Format of the "sockaddr" socket address varies by address family, # but [0] is always the IP address portion. addrs_by_family[family].append(sockaddr[0]) if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]: return addrs_by_family[socket.AF_INET6][0] if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]: return addrs_by_family[socket.AF_INET][0] logger.warn('Failed to auto-detect wildcard address, assuming %s', fallback_address) return fallback_address def server_bind(self): """Override to enable IPV4 mapping for IPV6 sockets when desired. The main use case for this is so that when no host is specified, TensorBoard can listen on all interfaces for both IPv4 and IPv6 connections, rather than having to choose v4 or v6 and hope the browser didn't choose the other one. """ socket_is_v6 = ( hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6) has_v6only_option = ( hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY')) if self._auto_wildcard and socket_is_v6 and has_v6only_option: try: self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except socket.error as e: # Log a warning on failure to dual-bind, except for EAFNOSUPPORT # since that's expected if IPv4 isn't supported at all (IPv6-only). if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT: logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e)) super(WerkzeugServer, self).server_bind() def handle_error(self, request, client_address): """Override to get rid of noisy EPIPE errors.""" del request # unused # Kludge to override a SocketServer.py method so we can get rid of noisy # EPIPE errors. They're kind of a red herring as far as errors go. For # example, `curl -N http://localhost:6006/ | head` will cause an EPIPE. exc_info = sys.exc_info() e = exc_info[1] if isinstance(e, IOError) and e.errno == errno.EPIPE: logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address)) else: logger.error('HTTP serving error', exc_info=exc_info) def get_url(self): if self._auto_wildcard: display_host = socket.gethostname() else: host = self._flags.host display_host = ( '[%s]' % host if ':' in host and not host.startswith('[') else host) return 'http://%s:%d%s/' % (display_host, self.server_port, self._flags.path_prefix.rstrip('/')) def _fix_werkzeug_logging(self): """Fix werkzeug logging setup so it inherits TensorBoard's log level. This addresses a change in werkzeug 0.15.0+ [1] that causes it set its own log level to INFO regardless of the root logger configuration. We instead want werkzeug to inherit TensorBoard's root logger log level (set via absl to WARNING by default). [1]: https://github.com/pallets/werkzeug/commit/4cf77d25858ff46ac7e9d64ade054bf05b41ce12 """ # Log once at DEBUG to force werkzeug to initialize its singleton logger, # which sets the logger level to INFO it if is unset, and then access that # object via logging.getLogger('werkzeug') to durably revert the level to # unset (and thus make messages logged to it inherit the root logger level). self.log('debug', 'Fixing werkzeug logger to inherit TensorBoard log level') logging.getLogger('werkzeug').setLevel(logging.NOTSET) create_port_scanning_werkzeug_server = with_port_scanning(WerkzeugServer)
alarmcontroller.py
#!/usr/bin/python3 """ DIYHA Alarm Controller: Manage a simple digital high or low GPIO pin. """ # The MIT License (MIT) # # Copyright (c) 2020 parttimehacker@gmail.com # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from threading import Thread from time import sleep import RPi.GPIO as GPIO class AlarmController: """ Abstract and manage an alarm GPIO pin. """ def __init__(self, pin, interval=2): """ Initialize the alarm GPIO pin. """ self.alarm_pin = pin GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme GPIO.setup(self.alarm_pin, GPIO.OUT) # LED pin set as output GPIO.output(self.alarm_pin, GPIO.LOW) self.active = False self.pulsing = False self.interval = interval def start(self): """ Start the alarm thread """ self.active = True led_thread = Thread(target=self.manage_alarm, args=()) led_thread.daemon = True led_thread.start() def manage_alarm(self): """ sleep and then flash the LED """ while self.active: if self.pulsing: GPIO.output(self.alarm_pin, GPIO.HIGH) sleep(2.0) GPIO.output(self.alarm_pin, GPIO.LOW) sleep(self.interval) def sound_alarm(self, turn_on): """ Turn on or off power to the GPIO pin. """ """ Pull down to activate the relay """ if turn_on: GPIO.output(self.alarm_pin, GPIO.HIGH) else: GPIO.output(self.alarm_pin, GPIO.LOW) def sound_pulsing_alarm(self, turn_on): """ Turn on or off power to the GPIO pin. """ """ Pull down to activate the relay """ if turn_on: self.pulsing = True else: self.pulsing = False GPIO.output(self.alarm_pin, GPIO.LOW) def reset(self, ): """ Turn power off to the GPIO pin. """ self.pulsing = False GPIO.output(self.alarm_pin, GPIO.LOW)
util.py
import ConfigParser import datetime import inspect import os import sys import traceback __all__ = [] def _write_message(kind, message): # Get the file/line where this message was generated. f = inspect.currentframe() # Step out of _write_message, and then out of wrapper. f = f.f_back.f_back file,line,_,_,_ = inspect.getframeinfo(f) location = '%s:%d' % (os.path.basename(file), line) print >>sys.stderr, '%s: %s: %s' % (location, kind, message) note = lambda message: _write_message('note', message) warning = lambda message: _write_message('warning', message) error = lambda message: _write_message('error', message) fatal = lambda message: (_write_message('fatal error', message), sys.exit(1)) def sorted(l, **kwargs): l = list(l) l.sort(**kwargs) return l def list_split(list, item): parts = [] while item in list: index = list.index(item) parts.append(list[:index]) list = list[index+1:] parts.append(list) return parts def pairs(l): return zip(l, l[1:]) ### class EnumVal(object): def __init__(self, enum, name, value): self.enum = enum self.name = name self.value = value def __repr__(self): return '%s.%s' % (self.enum._name, self.name) class Enum(object): def __init__(self, name, **kwargs): self._name = name self.__items = dict((name, EnumVal(self, name, value)) for name,value in kwargs.items()) self.__reverse_map = dict((e.value,e.name) for e in self.__items.values()) self.__dict__.update(self.__items) def get_value(self, name): return self.__items.get(name) def get_name(self, value): return self.__reverse_map.get(value) def get_by_value(self, value): return self.__items.get(self.__reverse_map.get(value)) def contains(self, item): if not isinstance(item, EnumVal): return False return item.enum == self class multidict: def __init__(self, elts=()): self.data = {} for key,value in elts: self[key] = value def __contains__(self, item): return item in self.data def __getitem__(self, item): return self.data[item] def __setitem__(self, key, value): if key in self.data: self.data[key].append(value) else: self.data[key] = [value] def items(self): return self.data.items() def values(self): return self.data.values() def keys(self): return self.data.keys() def __len__(self): return len(self.data) def get(self, key, default=None): return self.data.get(key, default) def todict(self): return self.data.copy() ### class Preferences(object): def __init__(self, path): self.path = path self.config_path = os.path.join(path, "config") self.options = ConfigParser.RawConfigParser() # Load the config file, if present. if os.path.exists(self.config_path): self.options.read(self.config_path) def save(self): file = open(self.config_path, "w") try: self.options.write(file) finally: file.close() def get(self, section, option, default = None): if self.options.has_option(section, option): return self.options.get(section, option) else: return default def getboolean(self, section, option, default = None): if self.options.has_option(section, option): return self.options.getboolean(section, option) else: return default def setboolean(self, section, option, value): return self.options.set(section, option, str(value)) _prefs = None def get_prefs(): global _prefs if _prefs is None: _prefs = Preferences(os.path.expanduser("~/.llvmlab")) # Allow dynamic override of only_use_cache option. if os.environ.get("LLVMLAB_ONLY_USE_CACHE"): _prefs.setboolean("ci", "only_use_cache", True) return _prefs ### import threading import Queue def detect_num_cpus(): """ Detects the number of CPUs on a system. Cribbed from pp. """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"): # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) # Windows: if os.environ.has_key("NUMBER_OF_PROCESSORS"): ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1 # Default def execute_task_on_threads(fn, iterable, num_threads = None): """execute_task_on_threads(fn, iterable) -> iterable Given a task function to run on an iterable list of work items, execute the task on each item in the list using some number of threads, and yield the results of the task function. If a task function throws an exception, the exception will be printed but not returned to the caller. Clients which wish to control exceptions should handle them inside the task function. """ def push_work(): for item in iterable: work_queue.put(item) # Push sentinels to cause workers to terminate. for i in range(num_threads): work_queue.put(_sentinel) def do_work(): while True: # Read a work item. item = work_queue.get() # If we hit a sentinel, propogate it to the output queue and # terminate. if item is _sentinel: output_queue.put(_sentinel) break # Otherwise, execute the task and push to the output queue. try: output = (None, fn(item)) except Exception, e: output = ('error', sys.exc_info()) output_queue.put(output) # Compute the number of threads to use. if num_threads is None: num_threads = detect_num_cpus() # Create two queues, one for feeding items to the works and another for # consuming the output. work_queue = Queue.Queue() output_queue = Queue.Queue() # Create our unique sentinel object. _sentinel = [] # Create and run thread to push items onto the work queue. threading.Thread(target=push_work).start() # Create and run the worker threads. for i in range(num_threads): t = threading.Thread(target=do_work) t.daemon = True t.start() # Read items from the output queue until all threads are finished. finished = 0 while finished != num_threads: item = output_queue.get() # Check for termination marker. if item is _sentinel: finished += 1 continue # Check for exceptions. if item[0] == 'error': _,(t,v,tb) = item traceback.print_exception(t, v, tb) continue assert item[0] is None yield item[1] def timestamp(): return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') ### import collections class orderedset(object): def __init__(self, items=None): self.base = collections.OrderedDict() if items is not None: self.update(items) def update(self, items): for item in items: self.add(item) def add(self, item): self.base[item] = None def remove(self, item): del self.base[item] def __nonzero__(self): return bool(self.base) def __len__(self): return len(self.base) def __iter__(self): return iter(self.base) def __contains__(self, item): return item in self.base
monitored_session_test.py
# pylint: disable=g-bad-file-header # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for monitored_session.""" import collections import glob import os import sys import threading import time import traceback from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import debug_pb2 from tensorflow.python.checkpoint import checkpoint_management from tensorflow.python.client import session as session_lib from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import distribute_coordinator from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import load as saved_model_load from tensorflow.python.summary import summary from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import coordinator from tensorflow.python.training import monitored_session from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import session_run_hook from tensorflow.python.training import summary_io from tensorflow.python.training import training_util def latest_summaries(base_dir): """Parse summary events from latest event file in base_dir.""" file_paths = glob.glob(os.path.join(base_dir, 'events.*')) file_path = sorted(file_paths)[-1] if file_paths else None latest_events = summary_io.summary_iterator(file_path) if file_path else [] return [e for e in latest_events if e.HasField('summary')] class ScaffoldTest(test.TestCase): """Scaffold tests.""" def test_nothing_created_before_finalize(self): with ops.Graph().as_default(): scaffold = monitored_session.Scaffold() self.assertEqual(None, scaffold.init_op) self.assertEqual(None, scaffold.init_feed_dict) self.assertEqual(None, scaffold.init_fn) self.assertEqual(None, scaffold.ready_op) self.assertEqual(None, scaffold.ready_for_local_init_op) self.assertEqual(None, scaffold.local_init_op) self.assertEqual(None, scaffold.saver) def test_defaults_empty_graph(self): with ops.Graph().as_default(): scaffold = monitored_session.Scaffold() variables.VariableV1(1, name='my_var') variables.VariableV1( 2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES]) scaffold.finalize() self.assertTrue(isinstance(scaffold.init_op, ops.Operation)) self.assertEqual(None, scaffold.init_feed_dict) self.assertEqual(None, scaffold.init_fn) self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor)) self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor)) self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation)) self.assertEqual(None, scaffold.local_init_feed_dict) self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver)) with self.cached_session() as sess: self.assertItemsEqual([b'my_var', b'my_local_var'], sess.run(scaffold.ready_op)) self.assertItemsEqual([b'my_var'], sess.run(scaffold.ready_for_local_init_op)) sess.run(scaffold.init_op) self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op))) sess.run(scaffold.local_init_op) self.assertEqual(0, len(sess.run(scaffold.ready_op))) def test_defaults_no_variables(self): with ops.Graph().as_default(): scaffold = monitored_session.Scaffold() constant_op.constant(1, name='my_const') scaffold.finalize() self.assertTrue(isinstance(scaffold.init_op, ops.Operation)) self.assertEqual(None, scaffold.init_feed_dict) self.assertEqual(None, scaffold.init_fn) self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor)) self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor)) self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation)) self.assertEqual(None, scaffold.local_init_feed_dict) self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver)) def test_caches_values(self): with ops.Graph().as_default(): variables.VariableV1([1]) scaffold1 = monitored_session.Scaffold() scaffold1.finalize() scaffold2 = monitored_session.Scaffold() scaffold2.finalize() self.assertEqual(scaffold1.init_op, scaffold2.init_op) self.assertEqual(scaffold1.ready_op, scaffold2.ready_op) self.assertEqual(scaffold1.ready_for_local_init_op, scaffold2.ready_for_local_init_op) self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op) self.assertEqual(scaffold1.saver, scaffold2.saver) def test_raise_error_if_more_than_one_cached_item(self): with ops.Graph().as_default(): variables.VariableV1([1]) ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver()) ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver()) with self.assertRaisesRegex(RuntimeError, 'More than one item'): monitored_session.Scaffold().finalize() def test_uses_passed_values(self): with ops.Graph().as_default(): variables.VariableV1([1]) saver = saver_lib.Saver() scaffold = monitored_session.Scaffold( init_op=2, init_feed_dict=3, init_fn=lambda scaffold, sess: 4, ready_op=5, ready_for_local_init_op=6, local_init_op=7, local_init_feed_dict=8, saver=saver) scaffold.finalize() self.assertEqual(2, scaffold.init_op) self.assertEqual(3, scaffold.init_feed_dict) self.assertTrue(callable(scaffold.init_fn)) self.assertEqual(5, scaffold.ready_op) self.assertEqual(6, scaffold.ready_for_local_init_op) self.assertEqual(7, scaffold.local_init_op) self.assertEqual(8, scaffold.local_init_feed_dict) self.assertEqual(saver, scaffold.saver) def test_graph_is_finalized(self): with ops.Graph().as_default(): variables.VariableV1([1]) monitored_session.Scaffold().finalize() with self.assertRaisesRegex(RuntimeError, 'Graph is finalized and cannot be modified'): constant_op.constant([0]) def test_new_scaffold_from_default_scaffold(self): scaffold1 = monitored_session.Scaffold() with ops.Graph().as_default(): variables.VariableV1([1]) saver = saver_lib.Saver() scaffold2 = monitored_session.Scaffold( init_op=2, init_feed_dict=3, init_fn=lambda scaffold, sess: 4, ready_op=5, ready_for_local_init_op=6, local_init_op=7, local_init_feed_dict=8, saver=saver, copy_from_scaffold=scaffold1) scaffold2.finalize() self.assertEqual(2, scaffold2.init_op) self.assertEqual(3, scaffold2.init_feed_dict) self.assertTrue(callable(scaffold2.init_fn)) self.assertEqual(5, scaffold2.ready_op) self.assertEqual(6, scaffold2.ready_for_local_init_op) self.assertEqual(7, scaffold2.local_init_op) self.assertEqual(8, scaffold2.local_init_feed_dict) self.assertEqual(saver, scaffold2.saver) def test_new_scaffold_from_existing_scaffold(self): with ops.Graph().as_default(): variables.VariableV1([1]) saver = saver_lib.Saver() scaffold1 = monitored_session.Scaffold( init_op=2, init_feed_dict=3, init_fn=lambda scaffold, sess: 4, ready_op=5, ready_for_local_init_op=6, local_init_op=7, local_init_feed_dict=8, saver=saver) scaffold2 = monitored_session.Scaffold( init_op=4, init_feed_dict=6, init_fn=lambda scaffold, sess: 8, ready_op=10, ready_for_local_init_op=12, local_init_op=14, local_init_feed_dict=15, saver=saver, copy_from_scaffold=scaffold1) scaffold2.finalize() self.assertEqual(4, scaffold2.init_op) self.assertEqual(6, scaffold2.init_feed_dict) self.assertTrue(callable(scaffold2.init_fn)) self.assertEqual(10, scaffold2.ready_op) self.assertEqual(12, scaffold2.ready_for_local_init_op) self.assertEqual(14, scaffold2.local_init_op) self.assertEqual(15, scaffold2.local_init_feed_dict) self.assertEqual(saver, scaffold2.saver) def test_copy_from_scaffold_is_scaffold(self): with ops.Graph().as_default(): with self.assertRaisesRegex( TypeError, 'copy_from_scaffold is not a Scaffold instance'): monitored_session.Scaffold(copy_from_scaffold=1) def _test_dir(temp_dir, test_name): """Create an empty dir to use for tests. Args: temp_dir: Tmp directory path. test_name: Name of the test. Returns: Absolute path to the test directory. """ test_dir = os.path.join(temp_dir, test_name) if os.path.isdir(test_dir): for f in glob.glob('%s/*' % test_dir): os.remove(f) else: os.makedirs(test_dir) return test_dir class FakeHook(session_run_hook.SessionRunHook): def __init__(self): self.should_stop = False self.request = None self.call_counter = collections.Counter() self.last_run_context = None self.last_run_values = None def begin(self): self.call_counter['begin'] += 1 def after_create_session(self, session, coord): # pylint: disable=unused-argument self.call_counter['after_create_session'] += 1 def before_run(self, run_context): self.call_counter['before_run'] += 1 self.last_run_context = run_context return self.request def after_run(self, run_context, run_values): self.call_counter['after_run'] += 1 self.last_run_values = run_values if self.should_stop: run_context.request_stop() def end(self, session): self.call_counter['end'] += 1 class MonitoredTrainingSessionTest(test.TestCase): """Tests MonitoredTrainingSession.""" def test_saving_restoring_checkpoint(self): logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(2, session.run(gstep)) def test_save_checkpoint_steps(self): logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_checkpoint_steps=100, log_step_count_steps=10) as session: for _ in range(100): session.run(new_gstep) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(100, session.run(gstep)) def test_save_checkpoint_secs(self): logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_checkpoint_secs=0.1, log_step_count_steps=10) as session: session.run(new_gstep) time.sleep(0.2) for _ in range(10): session.run(new_gstep) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(11, session.run(gstep)) def test_save_restore_checkpoint_v1_saved_model(self): def _write_v1_simple_saved_model(export_dir): # Create v1 Saved Model with single variable `w0` with value 5.0. builder = saved_model_builder.SavedModelBuilder(export_dir) with ops.Graph().as_default(): _ = resource_variable_ops.ResourceVariable(5.0) with self.cached_session() as session: session.run(variables.global_variables_initializer()) builder.add_meta_graph_and_variables(session, ['foo']) builder.save() test_dir = _test_dir(self.get_temp_dir(), 'saved_model') _write_v1_simple_saved_model(test_dir) with ops.Graph().as_default(): # Load saved model with `load_v1_in_v2`. model = saved_model_load.load(test_dir) w0 = model.variables[0] # Define operation that increments `w0`. w_add = w0.assign_add(1.) gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( checkpoint_dir=test_dir) as session: w1 = session.run(w_add) self.assertEqual(w1, 6.) session.run(new_gstep) w2 = session.run(w_add) self.assertEqual(w2, 7.) # Stop and resume training. with monitored_session.MonitoredTrainingSession( checkpoint_dir=test_dir) as session: # `w0` saves its value of 7. w3 = session.run(w_add) self.assertEqual(w3, 8.) def test_summaries_steps(self): logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) summary.scalar('my_summary_tag', new_gstep * 2) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_summaries_steps=100, log_step_count_steps=10) as session: for _ in range(101): session.run(new_gstep) summaries = latest_summaries(logdir) tags = [s.summary.value[0].tag for s in summaries] self.assertIn('my_summary_tag', tags) self.assertIn('global_step/sec', tags) def test_summaries_secs(self): logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) summary.scalar('my_summary_tag', new_gstep * 2) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_summaries_steps=None, save_summaries_secs=0.1, log_step_count_steps=10) as session: session.run(new_gstep) time.sleep(0.2) for _ in range(101): session.run(new_gstep) summaries = latest_summaries(logdir) tags = [s.summary.value[0].tag for s in summaries] self.assertIn('my_summary_tag', tags) self.assertIn('global_step/sec', tags) def test_custom_saving(self): logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint') fake_hook = FakeHook() with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, chief_only_hooks=[fake_hook], save_checkpoint_secs=0) as session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) # Check whether custom hook called or not self.assertEqual(1, fake_hook.call_counter['begin']) # A restart will not find the checkpoint, since we didn't save. with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(0, session.run(gstep)) def test_save_graph_def(self): logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_checkpoint_steps=1, save_graph_def=True) as session: self.assertIn('graph.pbtxt', os.listdir(logdir)) self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 1) session.run(new_gstep) self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 2) def test_save_graph_def_false(self): logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir, save_checkpoint_steps=1, save_graph_def=False) as session: self.assertNotIn('graph.pbtxt', os.listdir(logdir)) self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta'))) session.run(new_gstep) self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta'))) class MockExtended(object): def __init__(self, between_graph, should_init, should_checkpoint, should_save_summary): self.experimental_between_graph = between_graph self.experimental_should_init = should_init self.should_checkpoint = should_checkpoint self.should_save_summary = should_save_summary class MockStrategy(object): def __init__(self, between_graph=False, should_init=True, should_checkpoint=None, should_save_summary=None): self.extended = MockExtended(between_graph, should_init, should_checkpoint, should_save_summary) class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase): """Test distribute coordinator controls summary saving and checkpointing.""" def test_summary_hook_enabled(self): context = distribute_coordinator._WorkerContext( MockStrategy(should_save_summary=True), None, None, None) logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) summary.scalar('my_summary_tag', new_gstep * 2) with context, monitored_session.MonitoredTrainingSession( checkpoint_dir=logdir, save_summaries_steps=100, log_step_count_steps=10) as session: for _ in range(101): session.run(new_gstep) summaries = latest_summaries(logdir) tags = [s.summary.value[0].tag for s in summaries] self.assertIn('my_summary_tag', tags) self.assertIn('global_step/sec', tags) def test_summary_hook_disabled(self): context = distribute_coordinator._WorkerContext( MockStrategy(should_save_summary=False), None, None, None) logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) summary.scalar('my_summary_tag', new_gstep * 2) with context, monitored_session.MonitoredTrainingSession( checkpoint_dir=logdir, save_summaries_steps=100, log_step_count_steps=10) as session: for _ in range(101): session.run(new_gstep) # No summary is saved. summaries = latest_summaries(logdir) self.assertEqual(len(summaries), 0) def test_checkpoint_hook_enabled(self): context = distribute_coordinator._WorkerContext( MockStrategy(should_checkpoint=True), None, None, None) logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with context, monitored_session.MonitoredTrainingSession( checkpoint_dir=logdir, save_checkpoint_steps=100, log_step_count_steps=10) as session: for _ in range(100): session.run(new_gstep) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredTrainingSession( is_chief=True, checkpoint_dir=logdir) as session: self.assertEqual(100, session.run(gstep)) def test_checkpoint_hook_disabled(self): context = distribute_coordinator._WorkerContext( MockStrategy(should_checkpoint=False), None, None, None) logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with context, monitored_session.MonitoredTrainingSession( checkpoint_dir=logdir, save_checkpoint_steps=100, log_step_count_steps=10) as session: for _ in range(100): session.run(new_gstep) # No checkpoint is saved. checkpoint = checkpoint_management.latest_checkpoint(logdir) self.assertIsNone(checkpoint) def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self): strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy() strategy.extended._is_chief = False context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1) logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() new_gstep = state_ops.assign_add(gstep, 1) with context, monitored_session.MonitoredTrainingSession( checkpoint_dir=logdir, save_checkpoint_steps=100, log_step_count_steps=10) as session: for _ in range(100): session.run(new_gstep) # No checkpoint is saved. checkpoint = checkpoint_management.latest_checkpoint(logdir) self.assertIsNone(checkpoint) # But saved to a temporary directory. checkpoint = checkpoint_management.latest_checkpoint( os.path.join(logdir, 'tmp_worker_1')) self.assertIsNotNone(checkpoint) class StopAtNSession(monitored_session._WrappedSession): """A wrapped session that stops at the N-th call to _check_stop.""" def __init__(self, sess, n): super(StopAtNSession, self).__init__(sess) self._count = n def _check_stop(self): if self._count == 0: return True self._count -= 1 return False class WrappedSessionTest(test.TestCase): """_WrappedSession tests.""" @test_util.run_deprecated_v1 def test_properties(self): with self.cached_session() as sess: constant_op.constant(0.0) wrapped_sess = monitored_session._WrappedSession(sess) self.assertEqual(sess.graph, wrapped_sess.graph) self.assertEqual(sess.sess_str, wrapped_sess.sess_str) @test_util.run_deprecated_v1 def test_should_stop_on_close(self): with self.cached_session() as sess: wrapped_sess = monitored_session._WrappedSession(sess) self.assertFalse(wrapped_sess.should_stop()) wrapped_sess.close() self.assertTrue(wrapped_sess.should_stop()) @test_util.run_deprecated_v1 def test_should_stop_uses_check_stop(self): with self.cached_session() as sess: wrapped_sess = StopAtNSession(sess, 3) self.assertFalse(wrapped_sess.should_stop()) self.assertFalse(wrapped_sess.should_stop()) self.assertFalse(wrapped_sess.should_stop()) self.assertTrue(wrapped_sess.should_stop()) @test_util.run_deprecated_v1 def test_should_stop_delegates_to_wrapped_session(self): with self.cached_session() as sess: wrapped_sess0 = StopAtNSession(sess, 4) wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0) self.assertFalse(wrapped_sess1.should_stop()) self.assertFalse(wrapped_sess1.should_stop()) self.assertFalse(wrapped_sess1.should_stop()) self.assertFalse(wrapped_sess1.should_stop()) self.assertTrue(wrapped_sess1.should_stop()) @test_util.run_deprecated_v1 def test_close_twice(self): with self.cached_session() as sess: wrapped_sess = monitored_session._WrappedSession(sess) wrapped_sess.close() self.assertTrue(wrapped_sess.should_stop()) wrapped_sess.close() self.assertTrue(wrapped_sess.should_stop()) @test_util.run_deprecated_v1 def test_run(self): with self.cached_session() as sess: c = constant_op.constant(0) v = array_ops.identity(c) self.assertEqual(42, sess.run(v, feed_dict={c: 42})) wrapped_sess = monitored_session._WrappedSession(sess) self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51})) def busy_wait_for_coord_stop(coord): while not coord.should_stop(): time.sleep(0.001) class CoordinatedSessionTest(test.TestCase): """_CoordinatedSession tests.""" @test_util.run_deprecated_v1 def test_properties(self): with self.cached_session() as sess: constant_op.constant(0.0) coord = coordinator.Coordinator() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertEqual(sess.graph, coord_sess.graph) self.assertEqual(sess.sess_str, coord_sess.sess_str) @test_util.run_deprecated_v1 def test_run(self): with self.cached_session() as sess: c = constant_op.constant(0) v = array_ops.identity(c) coord = coordinator.Coordinator() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42})) @test_util.run_deprecated_v1 def test_should_stop_on_close(self): with self.cached_session() as sess: coord = coordinator.Coordinator() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertFalse(coord_sess.should_stop()) coord_sess.close() self.assertTrue(coord_sess.should_stop()) @test_util.run_deprecated_v1 def test_should_stop_on_coord_stop(self): with self.cached_session() as sess: coord = coordinator.Coordinator() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertFalse(coord_sess.should_stop()) coord.request_stop() self.assertTrue(coord_sess.should_stop()) @test_util.run_deprecated_v1 def test_dont_request_stop_on_exception_in_main_thread(self): with self.cached_session() as sess: c = constant_op.constant(0) v = array_ops.identity(c) coord = coordinator.Coordinator() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertFalse(coord_sess.should_stop()) self.assertEqual(0, coord_sess.run(c)) self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1})) with self.assertRaisesRegex(TypeError, 'None has invalid type'): coord_sess.run([None], feed_dict={c: 2}) self.assertFalse(coord.should_stop()) self.assertFalse(coord_sess.should_stop()) @test_util.run_deprecated_v1 def test_stop_threads_on_close_after_exception(self): with self.cached_session() as sess: c = constant_op.constant(0) v = array_ops.identity(c) coord = coordinator.Coordinator() threads = [ threading.Thread( target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3) ] for t in threads: coord.register_thread(t) t.start() coord_sess = monitored_session._CoordinatedSession(sess, coord) self.assertFalse(coord_sess.should_stop()) for t in threads: self.assertTrue(t.is_alive()) self.assertEqual(0, coord_sess.run(c)) for t in threads: self.assertTrue(t.is_alive()) self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1})) for t in threads: self.assertTrue(t.is_alive()) with self.assertRaisesRegex(TypeError, 'None has invalid type'): coord_sess.run([None], feed_dict={c: 2}) coord_sess.close() for t in threads: self.assertFalse(t.is_alive()) self.assertTrue(coord.should_stop()) self.assertTrue(coord_sess.should_stop()) def test_stop_threads_on_close(self): with self.cached_session() as sess: coord = coordinator.Coordinator() threads = [ threading.Thread( target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3) ] for t in threads: coord.register_thread(t) t.start() coord_sess = monitored_session._CoordinatedSession(sess, coord) coord_sess.close() for t in threads: self.assertFalse(t.is_alive()) self.assertTrue(coord.should_stop()) self.assertTrue(coord_sess.should_stop()) @test_util.run_deprecated_v1 def test_propagates_exception_trace(self): assertion = control_flow_ops.Assert(False, ['This should fail.']) with self.cached_session() as sess: coord = coordinator.Coordinator(clean_stop_exception_types=()) coord_sess = monitored_session._CoordinatedSession(sess, coord) try: coord_sess.run([assertion]) self.fail('No exception was raised by assertion.') except errors_impl.InvalidArgumentError: # Extract the name of the file where the exception was first raised. _, _, exc_traceback = sys.exc_info() tb = traceback.extract_tb(exc_traceback) exc_source_file = tb[-1][0] exc_source_basename = os.path.basename(exc_source_file) # If it's monitored_session.py then the original stack trace was not # correctly propagated. self.assertIn( exc_source_basename, ['session.py', 'monitored_session.py'], 'The exception was raised from an unrecognized file. This unit ' 'test probably needs to be updated. Traceback:\n%s\n' % tb) self.assertEqual( exc_source_basename, 'session.py', 'Original stack trace was not propagated by MonitoredSession. ' 'Traceback:\n%s' % tb) class AbortAtNSession(object): """A mock session that aborts at the N-th run call.""" def __init__(self, sess, n): self._sess = sess self._count = n def close(self): pass def run(self, *args, **kwargs): if self._count == 0: raise errors_impl.AbortedError('Aborted at N', None, None) self._count -= 1 return self._sess.run(*args, **kwargs) class StopCoordinatorWithException(session_run_hook.SessionRunHook): """With this hook Coordinator throws an exception after N-runs.""" def __init__(self, calls_before_stopping, exception_to_raise=None): self._started_the_side_thread_already = False self._lock = threading.Lock() self._stored_exception_event = threading.Event() self._calls_before_stopping = calls_before_stopping self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError( None, None, 'Aborted at N')) def _maybe_stop_with_exception(self, coord): while True: with self._lock: if self._calls_before_stopping == 0: try: raise self._exception_to_raise except Exception as e: # pylint: disable=broad-except coord.request_stop(e) self._stored_exception_event.set() break def after_create_session(self, session, coord): if self._started_the_side_thread_already: return separate_thread = threading.Thread( target=self._maybe_stop_with_exception, args=(coord,)) coord.register_thread(separate_thread) separate_thread.start() self._started_the_side_thread_already = True # Coordinator will take care of joining `separate_thread`. def after_run(self, run_context, run_values): stopping_now = False with self._lock: self._calls_before_stopping -= 1 if self._calls_before_stopping == 0: stopping_now = True if stopping_now: self._stored_exception_event.wait() class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException): """With this hook training encounters an exception after N-runs.""" def __init__(self, calls_before_stopping): StopCoordinatorWithException.__init__(self, calls_before_stopping) self._coord = None def after_create_session(self, session, coord): self._coord = coord return StopCoordinatorWithException.after_create_session( self, session, coord) def after_run(self, run_context, run_values): StopCoordinatorWithException.after_run(self, run_context, run_values) try: # After a `run`, an exception could have been stored inside the # coordinator. self._coord.raise_requested_exception() except errors_impl.AbortedError: # In real world, the main thread may or may not know about the exception # that stopped the coordinator. Because the coordinator has stopped, the # main thread could have gotten stuck as well (for example, the # coordinator was supposed to execute `FIFOQueue.enqueue` while the main # thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck, # the session is going to get garbage collected after some time with: raise errors_impl.CancelledError(None, None, 'Session got garbage-collected.') class CountingSessionCreator(object): """A creator that counts the number of created sessions.""" def __init__(self, session): self._initial_session = session # We only have one session per test case. We can't re-create it, thus # it shouldn't be closed. self._initial_session.close = lambda *args: None self._create_session_calls = 0 @property def number_of_sessions_created(self): return self._create_session_calls def create_session(self): self._create_session_calls += 1 return self._initial_session class RecoverableSessionTest(test.TestCase): """_RecoverableSession tests.""" class _SessionReturner(object): def __init__(self, sess): self._sess = sess def create_session(self): return self._sess @test_util.run_deprecated_v1 def test_properties(self): with self.cached_session() as sess: constant_op.constant(0.0) recoverable_sess = monitored_session._RecoverableSession( self._SessionReturner(sess)) self.assertEqual(sess.graph, recoverable_sess.graph) self.assertEqual(sess.sess_str, recoverable_sess.sess_str) @test_util.run_deprecated_v1 def test_run(self): with self.cached_session() as sess: c = constant_op.constant(0) v = array_ops.identity(c) recoverable_sess = monitored_session._RecoverableSession( self._SessionReturner(sess)) self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51})) @test_util.run_deprecated_v1 def test_recovery(self): with self.cached_session() as sess: class StackSessionCreator(object): def __init__(self, sess): self.sessions_to_use = [ AbortAtNSession(sess, x + 1) for x in range(3) ] def create_session(self): return self.sessions_to_use.pop(0) c = constant_op.constant(0) v = array_ops.identity(c) session_creator = StackSessionCreator(sess) # List of 3 sessions to use for recovery. The first one aborts # after 1 run() call, the second after 2 run calls, the third # after 3 run calls. self.assertEqual(3, len(session_creator.sessions_to_use)) # Make the recoverable session uses these 3 sessions in sequence by # passing a factory that pops from the session_to_use list. recoverable_sess = monitored_session._RecoverableSession(session_creator) self.assertEqual( 2, len(session_creator.sessions_to_use)) # One session popped. # Using first session. self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51})) self.assertEqual( 2, len(session_creator.sessions_to_use)) # Still 2 sessions available # This will fail and recover by picking up the second session. self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42})) self.assertEqual( 1, len(session_creator.sessions_to_use)) # Still 1 session available self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33})) self.assertEqual( 1, len(session_creator.sessions_to_use)) # Still 1 session available # This will fail and recover by picking up the last session. self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24})) self.assertEqual( 0, len(session_creator.sessions_to_use)) # All sessions used. self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11})) self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0})) # This will fail and throw a real error as the pop() will fail. with self.assertRaisesRegex(IndexError, 'pop from empty list'): recoverable_sess.run(v, feed_dict={c: -12}) @test_util.run_deprecated_v1 def test_recovery_from_coordinator_exception(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = monitored_session.MonitoredSession( session_creator, [StopCoordinatorWithException(calls_before_stopping=2)]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run(v, feed_dict={c: 51})) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run(v, feed_dict={c: 42})) # Even though the coordinator was asked to stop, the underlying session is # recreated and is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) @test_util.run_deprecated_v1 def test_recovery_from_non_preemption_in_coordinator(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) hook = StopCoordinatorWithException( calls_before_stopping=2, exception_to_raise=errors_impl.UnknownError( None, None, 'Some fatal exception inside the coordinator.')) session = monitored_session.MonitoredSession(session_creator, [hook]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run(v, feed_dict={c: 51})) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run(v, feed_dict={c: 42})) # The coordinator was asked to stop due to non-redeemable error. Training # should stop and the session should not be recreated. self.assertTrue(session.should_stop()) self.assertEqual(1, session_creator.number_of_sessions_created) with self.assertRaises(errors_impl.UnknownError): session.close() @test_util.run_deprecated_v1 def test_recovery_from_session_getting_stuck(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = monitored_session.MonitoredSession( session_creator, [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) # Training will not fail, since it's the call number 0. self.assertEqual(51, session.run(v, feed_dict={c: 51})) self.assertFalse(session.should_stop()) # Training will fail during the next call, since it's the call # number 1. self.assertEqual(42, session.run(v, feed_dict={c: 42})) # Even though the coordinator stopped which and training failed, the # underlying session is recreated and training is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) @test_util.run_deprecated_v1 def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = monitored_session.MonitoredSession( session_creator, [StopCoordinatorWithException(calls_before_stopping=2)]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: value}) return step_fn # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # Even though the coordinator was asked to stop, the underlying session is # recreated and is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) @test_util.run_deprecated_v1 def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) hook = StopCoordinatorWithException( calls_before_stopping=2, exception_to_raise=errors_impl.UnknownError( None, None, 'Some fatal exception inside the coordinator.')) session = monitored_session.MonitoredSession(session_creator, [hook]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: value}) return step_fn # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # The coordinator was asked to stop due to non-redeemable error. Training # should stop and the session should not be recreated. self.assertTrue(session.should_stop()) self.assertEqual(1, session_creator.number_of_sessions_created) with self.assertRaises(errors_impl.UnknownError): session.close() @test_util.run_deprecated_v1 def test_recovery_from_session_getting_stuck_when_run_hooks(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = monitored_session.MonitoredSession( session_creator, [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)]) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: value}) return step_fn # Training will not fail, since it's the call number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # Training will fail during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # Even though the coordinator stopped which and training failed, the # underlying session is recreated and training is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) def create_raw_session_with_failing_coordinator(self, session_creator, hook): """Return MonitoredSession that triggers coordinator failures.""" session = monitored_session.MonitoredSession(session_creator, [hook]) # We would like to test a situation where during fetches through the # raw session, the coordinator fails with an exception. To do that, we # are going to use (raw_session + StopCoordinatorWithException) hook # combination that is stored in # `MonitoredSession._RecoverableSession._CoordinatedSession._sess` # at this point: session._tf_sess = lambda: session._sess._sess._sess # `run()` on such a session is equivalent to `run()` on the raw session # with separate coordinator threads independently stopping with an # exception. return session @test_util.run_deprecated_v1 def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = self.create_raw_session_with_failing_coordinator( session_creator, StopCoordinatorWithException(calls_before_stopping=2)) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.session.run(fetches=v, feed_dict={c: value}) return step_fn # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # Even though the coordinator was asked to stop, the underlying session is # recreated and is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) @test_util.run_deprecated_v1 def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = self.create_raw_session_with_failing_coordinator( session_creator, StopCoordinatorWithException( calls_before_stopping=2, exception_to_raise=errors_impl.UnknownError( None, None, 'Some fatal exception inside the coordinator.'))) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: value}) return step_fn # The coordinator will not abort during this call, since it's the call # number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # The coordinator will abort during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # The coordinator was asked to stop due to non-redeemable error. Training # should stop and the session should not be recreated. self.assertTrue(session.should_stop()) self.assertEqual(1, session_creator.number_of_sessions_created) with self.assertRaises(errors_impl.UnknownError): session.close() @test_util.run_deprecated_v1 def test_recovery_from_session_getting_stuck_with_raw_session(self): with self.cached_session() as test_session: session_creator = CountingSessionCreator(test_session) session = self.create_raw_session_with_failing_coordinator( session_creator, FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)) self.assertEqual(1, session_creator.number_of_sessions_created) self.assertFalse(session.should_stop()) c = constant_op.constant(0) v = array_ops.identity(c) def feed_step_fn(value): def step_fn(step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: value}) return step_fn # Training will not fail, since it's the call number 0. self.assertEqual(51, session.run_step_fn(feed_step_fn(51))) self.assertFalse(session.should_stop()) # Training will fail during the next call, since it's the call # number 1. self.assertEqual(42, session.run_step_fn(feed_step_fn(42))) # Even though the coordinator stopped which and training failed, the # underlying session is recreated and training is to be continued. self.assertFalse(session.should_stop()) self.assertEqual(2, session_creator.number_of_sessions_created) class FakeSession(monitored_session._WrappedSession): def __init__(self, sess): monitored_session._WrappedSession.__init__(self, sess) self.args_called = {} def run(self, fetches, **kwargs): self.args_called = dict(kwargs) # Call run only with fetches since we directly pass other arguments. return monitored_session._WrappedSession.run(self, fetches) class HookedSessionTest(test.TestCase): """Tests of _HookedSession.""" def testRunPassesAllArguments(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_run = FakeSession(sess) mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[]) a_tensor = constant_op.constant([0], name='a_tensor') self.evaluate(variables.global_variables_initializer()) output = mon_sess.run(fetches=a_tensor, feed_dict='a_feed', options='an_option', run_metadata='a_metadata') self.assertEqual(output, [0]) self.assertEqual(mock_run.args_called, { 'feed_dict': 'a_feed', 'options': 'an_option', 'run_metadata': 'a_metadata' }) def testCallsHooksBeginEnd(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') self.evaluate(variables.global_variables_initializer()) mon_sess.run(a_tensor) for hook in [mock_hook, mock_hook2]: self.assertEqual( hook.last_run_values, session_run_hook.SessionRunValues( results=None, options=config_pb2.RunOptions(), run_metadata=config_pb2.RunMetadata())) self.assertEqual(hook.last_run_context.original_args, session_run_hook.SessionRunArgs(a_tensor)) self.assertEqual(hook.last_run_context.session, sess) self.assertEqual(hook.call_counter['begin'], 0) self.assertEqual(hook.call_counter['after_create_session'], 0) self.assertEqual(hook.call_counter['before_run'], 1) self.assertEqual(hook.call_counter['after_run'], 1) def testShouldStop(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) constant_op.constant([0], name='a_tensor') self.evaluate(variables.global_variables_initializer()) mon_sess.run(fetches='a_tensor') self.assertFalse(mon_sess.should_stop()) mock_hook.should_stop = True mon_sess.run(fetches='a_tensor') self.assertTrue(mon_sess.should_stop()) def testFetchesHookRequests(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') another_tensor = constant_op.constant([5], name='another_tensor') third_tensor = constant_op.constant([10], name='third_tensor') mock_hook.request = session_run_hook.SessionRunArgs([another_tensor]) mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor]) self.evaluate(variables.global_variables_initializer()) output = mon_sess.run(fetches=a_tensor) self.assertEqual(output, [0]) self.assertEqual(mock_hook.last_run_values.results, [5]) self.assertEqual(mock_hook2.last_run_values.results, [10]) def testOnlyHooksHaveFeeds(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') b_tensor = constant_op.constant([0], name='b_tensor') add_tensor = a_tensor + b_tensor mock_hook.request = session_run_hook.SessionRunArgs( None, feed_dict={a_tensor: [5]}) mock_hook2.request = session_run_hook.SessionRunArgs( None, feed_dict={b_tensor: [10]}) self.evaluate(variables.global_variables_initializer()) self.assertEqual(mon_sess.run(fetches=add_tensor), [15]) def testBothHooksAndUserHaveFeeds(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') b_tensor = constant_op.constant([0], name='b_tensor') c_tensor = constant_op.constant([0], name='c_tensor') add_tensor = a_tensor + b_tensor + c_tensor mock_hook.request = session_run_hook.SessionRunArgs( None, feed_dict={a_tensor: [5]}) mock_hook2.request = session_run_hook.SessionRunArgs( None, feed_dict={b_tensor: [10]}) self.evaluate(variables.global_variables_initializer()) feed_dict = {c_tensor: [20]} self.assertEqual( mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35]) # User feed_dict should not be changed self.assertEqual(len(feed_dict), 1) def testHooksFeedConflicts(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') b_tensor = constant_op.constant([0], name='b_tensor') add_tensor = a_tensor + b_tensor mock_hook.request = session_run_hook.SessionRunArgs( None, feed_dict={a_tensor: [5]}) mock_hook2.request = session_run_hook.SessionRunArgs( None, feed_dict={a_tensor: [10]}) self.evaluate(variables.global_variables_initializer()) with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'): mon_sess.run(fetches=add_tensor) def testHooksAndUserFeedConflicts(self): with ops.Graph().as_default(), session_lib.Session() as sess: mock_hook = FakeHook() mock_hook2 = FakeHook() mon_sess = monitored_session._HookedSession( sess=sess, hooks=[mock_hook, mock_hook2]) a_tensor = constant_op.constant([0], name='a_tensor') b_tensor = constant_op.constant([0], name='b_tensor') add_tensor = a_tensor + b_tensor mock_hook.request = session_run_hook.SessionRunArgs( None, feed_dict={a_tensor: [5]}) mock_hook2.request = session_run_hook.SessionRunArgs( None, feed_dict={b_tensor: [10]}) self.evaluate(variables.global_variables_initializer()) with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'): mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]}) class RaiseOnceAtCountN(session_run_hook.SessionRunHook): """Hook that raises an Exception at step N.""" def __init__(self, n, ex): self.n = n self.ex = ex self.raised = False def before_run(self, run_context): # Raise the first time we reach step N. self.n -= 1 if 0 == self.n and not self.raised: self.raised = True raise self.ex return None class RunOptionsMetadataHook(session_run_hook.SessionRunHook): """A hook that observes & optionally modifies RunOptions and RunMetadata.""" def __init__(self, trace_level, timeout_in_ms, output_partition_graphs, debug_tensor_watch, report_tensor_allocations_upon_oom): self._trace_level = trace_level self._timeout_in_ms = timeout_in_ms self._output_partition_graphs = output_partition_graphs self._debug_tensor_watch = debug_tensor_watch self._report_tensor_allocations_upon_oom = ( report_tensor_allocations_upon_oom) self.run_options_list = [] self.run_metadata_list = [] def before_run(self, run_context): options = config_pb2.RunOptions( trace_level=self._trace_level, timeout_in_ms=self._timeout_in_ms, output_partition_graphs=self._output_partition_graphs, report_tensor_allocations_upon_oom=self ._report_tensor_allocations_upon_oom) options.debug_options.debug_tensor_watch_opts.extend( [self._debug_tensor_watch]) return session_run_hook.SessionRunArgs(None, None, options=options) def after_run(self, run_context, run_values): self.run_options_list.append(run_values.options) self.run_metadata_list.append(run_values.run_metadata) class MonitoredSessionTest(test.TestCase): """MonitoredSession tests.""" def test_defaults(self): with ops.Graph().as_default(): a_var = variables.VariableV1(0) with monitored_session.MonitoredSession() as session: self.assertEqual(0, session.run(a_var)) def test_last_step(self): logdir = _test_dir(self.get_temp_dir(), 'test_last_step') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) # Run till step 3 and save. hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)] with monitored_session.MonitoredSession(hooks=hooks) as session: self.assertEqual(0, session.run(gstep)) self.assertFalse(session.should_stop()) self.assertEqual(1, session.run(do_step)) self.assertFalse(session.should_stop()) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) self.assertEqual(3, session.run(do_step)) self.assertTrue(session.should_stop()) save_path = saver_lib._get_saver_or_default().save( session._coordinated_creator.tf_sess, os.path.join(logdir, 'step-3')) # Run till step 5 and save. def load_ckpt(scaffold, sess): scaffold.saver.restore(sess, save_path) session_creator = monitored_session.ChiefSessionCreator( monitored_session.Scaffold(init_fn=load_ckpt)) hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)] with monitored_session.MonitoredSession( hooks=hooks, session_creator=session_creator) as session: self.assertEqual(3, session.run(gstep)) self.assertFalse(session.should_stop()) self.assertEqual(4, session.run(do_step)) self.assertFalse(session.should_stop()) self.assertEqual(5, session.run(do_step)) self.assertTrue(session.should_stop()) def test_num_steps(self): logdir = _test_dir(self.get_temp_dir(), 'test_num_steps') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) # Do 3 steps and save. hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)] with monitored_session.MonitoredSession(hooks=hooks) as session: session.run(do_step) self.assertFalse(session.should_stop()) session.run(do_step) self.assertFalse(session.should_stop()) session.run(do_step) self.assertTrue(session.should_stop()) save_path = saver_lib._get_saver_or_default().save( session._coordinated_creator.tf_sess, os.path.join(logdir, 'step-3')) # Restore and do 4 steps. def load_ckpt(scaffold, sess): scaffold.saver.restore(sess, save_path) session_creator = monitored_session.ChiefSessionCreator( scaffold=monitored_session.Scaffold(init_fn=load_ckpt)) hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)] with monitored_session.MonitoredSession( hooks=hooks, session_creator=session_creator) as session: self.assertEqual(4, session.run(do_step)) self.assertFalse(session.should_stop()) session.run(do_step) self.assertFalse(session.should_stop()) session.run(do_step) self.assertFalse(session.should_stop()) session.run(do_step) self.assertTrue(session.should_stop()) # This set of tests, verifies the supervised session behavior when exceptions # are raised next to the innermost session run() call. @test_util.run_deprecated_v1 def test_recovery(self): logdir = _test_dir(self.get_temp_dir(), 'test_recovery') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) scaffold = monitored_session.Scaffold() # Use a hook to save the model every 100 steps. It also saves it at # the end. hooks = [ basic_session_run_hooks.CheckpointSaverHook( logdir, save_steps=1, scaffold=scaffold) ] with monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( scaffold, checkpoint_dir=logdir), hooks=hooks) as session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( scaffold, checkpoint_dir=logdir)) as session: self.assertEqual(2, session.run(gstep)) # A restart will find the checkpoint and recover automatically. with monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( scaffold, checkpoint_filename_with_path=checkpoint_management. latest_checkpoint(logdir))) as session: self.assertEqual(2, session.run(gstep)) def test_retry_initialization_on_aborted_error(self): # Tests that we silently retry on abort during initialization. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() self.init_raised_aborted_error = False def _init_fn(scaffold, session): _, _ = scaffold, session if not self.init_raised_aborted_error: self.init_raised_aborted_error = True raise errors_impl.AbortedError(None, None, 'Abort') with monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( scaffold=monitored_session.Scaffold( init_fn=_init_fn))) as session: self.assertFalse(session.should_stop()) self.assertEqual(0, session.run(gstep)) self.assertTrue(self.init_raised_aborted_error) def _retry_test(self, ex): # Tests that we silently retry on error. Note that this does not test # recovery as we do not use a CheckpointSaver in this test. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) hook = RaiseOnceAtCountN(4, ex) with monitored_session.MonitoredSession(hooks=[hook]) as session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # Here at step 3, the hook triggers and raises AbortedError. The # MonitoredSession automatically retries and restart from a freshly # initialized session, so the step is back to 0 and running do_step # moves it to 1. self.assertEqual(1, session.run(do_step)) self.assertFalse(session.should_stop()) self.assertTrue(hook.raised) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) def test_retry_on_aborted_error(self): self._retry_test(errors_impl.AbortedError(None, None, 'Abort')) def test_retry_on_unavailable_error(self): self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable')) def test_recover_and_retry_on_aborted_error(self): # Tests that we silently retry and recover on abort. This test uses # a CheckpointSaver to have something to recover from. logdir = _test_dir(self.get_temp_dir(), 'test_recover_and_retry_on_aborted_error') with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) scaffold = monitored_session.Scaffold() abort_hook = RaiseOnceAtCountN( 4, errors_impl.AbortedError(None, None, 'Abort')) # Save after each step. ckpt_hook = basic_session_run_hooks.CheckpointSaverHook( logdir, save_steps=1, scaffold=scaffold) hooks = [abort_hook, ckpt_hook] with monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( scaffold, checkpoint_dir=logdir), hooks=hooks) as session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # Here at step 3, the hook triggers and raises AbortedError. The # MonitoredSession automatically restores and retries. self.assertEqual(3, session.run(do_step)) self.assertTrue(abort_hook.raised) self.assertFalse(session.should_stop()) self.assertEqual(4, session.run(do_step)) self.assertFalse(session.should_stop()) def test_exit_cleanly_on_out_of_range_exception(self): # Tests that we stop cleanly when OutOfRange is raised. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None, 'EOI')) session = monitored_session.MonitoredSession(hooks=[hook]) # session should cleanly exit from the context. with session: self.assertEqual(0, session.run(gstep)) self.assertFalse(session.should_stop()) # Here at step 1, the hook triggers and raises OutOfRange. The # session should go into should_stop() mode. It should raise the # exception. So next step should not be executed. session.run(do_step) self.assertTrue(False) self.assertTrue(session.should_stop()) def test_exit_cleanly_on_stop_iteration_exception(self): # Tests that we stop cleanly when OutOfRange is raised. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) hook = RaiseOnceAtCountN(2, StopIteration) session = monitored_session.MonitoredSession(hooks=[hook]) # session should cleanly exit from the context. with session: self.assertEqual(0, session.run(gstep)) self.assertFalse(session.should_stop()) # Here at step 1, the hook triggers and raises StopIteration. The # session should go into should_stop() mode. It should raise the # exception. So next step should not be executed. session.run(do_step) self.assertTrue(False) self.assertTrue(session.should_stop()) def test_regular_exception_pass_through_run(self): # Tests that regular exceptions just pass through a "with # MonitoredSession" block and set the session in stop mode. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) hook = RaiseOnceAtCountN(4, RuntimeError('regular exception')) session = monitored_session.MonitoredSession(hooks=[hook]) with self.assertRaisesRegex(RuntimeError, 'regular exception'): with session: self.assertEqual(0, session.run(gstep)) self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # This triggers the hook and raises the exception session.run(do_step) # We should not hit this self.assertFalse(True) self.assertTrue(hook.raised) self.assertTrue(session.should_stop()) def test_regular_exception_reported_to_coord_pass_through_run(self): # Tests that regular exceptions reported to the coordinator from a thread # passes through a "run()" call within a "with MonitoredSession" block and # set the session in stop mode. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() session = monitored_session.MonitoredSession() run_performed_without_error = False with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'): with session: self.assertEqual(0, session.run(gstep)) # Report an exception through the coordinator. try: raise RuntimeError('a thread wants to stop') except RuntimeError as e: session._coordinated_creator.coord.request_stop(e) # Call run() which should perform normally. self.assertEqual(0, session.run(gstep)) run_performed_without_error = True self.assertTrue(run_performed_without_error) def test_regular_exception_reported_to_coord_pass_through_return(self): # Tests that regular exceptions reported to the coordinator from a thread # passes through returning from a "with MonitoredSession" block and # set the session in stop mode. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() session = monitored_session.MonitoredSession() with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'): with session: self.assertEqual(0, session.run(gstep)) # Report an exception through the coordinator. try: raise RuntimeError('a thread wants to stop') except RuntimeError as e: session._coordinated_creator.coord.request_stop(e) self.assertTrue(session.should_stop()) # This set of tests, verifies the session behavior when exceptions are raised # from code inside a "with MonitoredSession:" context. def test_stop_cleanly_when_no_exception_in_with_body(self): # Tests that regular exceptions pass through with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) session = monitored_session.MonitoredSession() with session: self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # Should have closed. self.assertTrue(session.should_stop()) self.assertTrue(session._is_closed()) def test_raises_regular_exceptions_in_with_body(self): # Tests that regular exceptions in "with body" are seen outside. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) session = monitored_session.MonitoredSession() # We should see that exception. with self.assertRaisesRegex(RuntimeError, 'regular exception'): with session: self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # Will be visible outside the "with body". raise RuntimeError('regular exception') # Should have closed. self.assertTrue(session.should_stop()) self.assertTrue(session._is_closed()) def test_graph(self): with ops.Graph().as_default() as g: with monitored_session.MonitoredSession() as session: self.assertEqual(g, session.graph) def test_graph_finalized_during_run_unfinalized_after_exit(self): with ops.Graph().as_default() as g: a_var = variables.VariableV1(0) with monitored_session.MonitoredSession() as session: self.assertEqual(0, session.run(a_var)) self.assertTrue(g.finalized) self.assertFalse(g.finalized) def test_keep_finalized_graph_as_finalized(self): with ops.Graph().as_default() as g: a_var = variables.VariableV1(0) monitored_session.Scaffold().finalize() with monitored_session.MonitoredSession() as session: self.assertEqual(0, session.run(a_var)) self.assertTrue(g.finalized) self.assertTrue(g.finalized) def test_merge_run_options_from_hooks(self): """Test for rewriting RunOptions and observing RunMetadata with hooks.""" with ops.Graph().as_default(): my_const = constant_op.constant(42, name='my_const') _ = constant_op.constant(24, name='my_const_2') watch_a = debug_pb2.DebugTensorWatch( node_name='my_const', output_slot=0, debug_ops=['DebugIdentity'], debug_urls=[]) hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False) watch_b = debug_pb2.DebugTensorWatch( node_name='my_const_2', output_slot=0, debug_ops=['DebugIdentity'], debug_urls=[]) hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True) with monitored_session.MonitoredSession( hooks=[hook_a, hook_b]) as session: self.assertEqual(42, session.run(my_const)) # trace_level=3 should have overridden trace_level=2; # timeout_in_ms=60000 should have overridden 30000; # output_partition_graphs=True should have overridden False. # The two debug tensor watches should have been merged. self.assertEqual([ config_pb2.RunOptions( trace_level=3, timeout_in_ms=60000, output_partition_graphs=True, debug_options=debug_pb2.DebugOptions( debug_tensor_watch_opts=[watch_a, watch_b]), report_tensor_allocations_upon_oom=True), ], hook_b.run_options_list) self.assertEqual(1, len(hook_b.run_metadata_list)) self.assertTrue( isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata)) self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0) def test_merge_caller_and_hook_run_options(self): """Test that RunOptions from caller and hooks can be merged properly.""" with ops.Graph().as_default(): my_const = constant_op.constant(42, name='my_const') _ = constant_op.constant(24, name='my_const_2') hook_watch = debug_pb2.DebugTensorWatch( node_name='my_const_2', output_slot=0, debug_ops=['DebugIdentity'], debug_urls=[]) hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False) with monitored_session.MonitoredSession(hooks=[hook]) as session: caller_watch = debug_pb2.DebugTensorWatch( node_name='my_const', output_slot=0, debug_ops=['DebugIdentity'], debug_urls=[]) caller_options = config_pb2.RunOptions( trace_level=3, timeout_in_ms=30000, output_partition_graphs=True, report_tensor_allocations_upon_oom=True) caller_options.debug_options.debug_tensor_watch_opts.extend( [caller_watch]) self.assertEqual(42, session.run(my_const, options=caller_options)) # trace_level=3 from the caller should override 2 from the hook. # timeout_in_ms=60000 from the hook should override from the caller. # output_partition_graph=True from the caller should override False # from the hook. # The two debug watches from the caller and the hook should be merged, # in that order. self.assertEqual([ config_pb2.RunOptions( trace_level=3, timeout_in_ms=60000, output_partition_graphs=True, debug_options=debug_pb2.DebugOptions( debug_tensor_watch_opts=[caller_watch, hook_watch]), report_tensor_allocations_upon_oom=True), ], hook.run_options_list) self.assertEqual(1, len(hook.run_metadata_list)) self.assertTrue( isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata)) self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0) @test_util.run_deprecated_v1 def test_with_statement_and_close(self): # Test case for https://github.com/tensorflow/tensorflow/issues/12224 # where close() inside the with should have a better error message. with self.assertRaisesRegex(RuntimeError, 'Session is already closed'): with monitored_session.MonitoredSession() as session: session.close() def test_step_fn_example(self): with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) def step_fn(step_context): value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2}) return value with monitored_session.MonitoredSession() as session: self.assertNear(3.2, session.run_step_fn(step_fn), 0.1) def test_step_function_stops(self): with ops.Graph().as_default(): def step_fn(step_context): step_context.request_stop() with monitored_session.MonitoredSession() as session: self.assertEqual(None, session.run_step_fn(step_fn)) self.assertTrue(session.should_stop()) def test_step_request_stop_without_a_with_block(self): with ops.Graph().as_default(): was_stop_iteration_raised = False def step_fn(step_context): step_context.request_stop() session = monitored_session.MonitoredSession() try: self.assertEqual(None, session.run_step_fn(step_fn)) except StopIteration: was_stop_iteration_raised = True self.assertTrue(was_stop_iteration_raised) self.assertFalse(session.should_stop()) def test_step_request_stop_in_a_loop(self): with ops.Graph().as_default(): def step_fn(step_context): step_context.request_stop() with monitored_session.MonitoredSession() as session: while not session.should_stop(): _ = session.run_step_fn(step_fn) self.fail('An exception should be raised on the line above.') def test_step_request_stop_with_returning_a_type(self): with ops.Graph().as_default(): def step_fn(step_context): del step_context return 'a type' with monitored_session.MonitoredSession() as session: self.assertEqual('a type', session.run_step_fn(step_fn)) def test_step_with_extra_arguments(self): with ops.Graph().as_default(): def step_fn(step_context, extra_foo): del step_context, extra_foo with monitored_session.MonitoredSession() as session: with self.assertRaisesRegex( ValueError, '`step_fn` may either have one `step_context` argument'): self.assertEqual(None, session.run_step_fn(step_fn)) def test_step_fn_belongs_to_a_class(self): with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) class Model(object): def step_fn(self, step_context): return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2}) with monitored_session.MonitoredSession() as session: model = Model() self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1) def test_step_fn_belongs_to_a_class_and_has_extra_methods(self): with ops.Graph().as_default(): class Model(object): def step_fn(self, step_context, extra_foo): del step_context, extra_foo with monitored_session.MonitoredSession() as session: with self.assertRaisesRegex( ValueError, '`step_fn` may either have one `step_context` argument'): model = Model() self.assertEqual(None, session.run_step_fn(model.step_fn)) def test_step_fn_with_hooks(self): with ops.Graph().as_default(): var = resource_variable_ops.ResourceVariable(0.0) # This test highlights the interaction of hooks with # `Monitoredsession.run_step_fn`. The order of execution of operations # below is: # 0. stage_0 # 1. stage_1_0 or stage_1_1 in an undefined order # 2. stage_2 stage_0 = state_ops.assign_add(var, 0.3) stage_1_0 = state_ops.assign_add(var, 0.7) # The order of `stage_1_0` and `stage_1_1` is undefined by # `MonitoredSession`, but we should be able to assert when both of them # are complete. To obtain a consistent result of adding two different # constants to `var`, we rely on a control dependency and # `ResourceVariable`. Otherwise, it is possible that one of the # additions overwrites the result of the other addition. with ops.control_dependencies([stage_1_0]): stage_1_1 = state_ops.assign_add(var, 0.5) stage_2 = state_ops.assign_add(var, 1.1) class Hook(session_run_hook.SessionRunHook): def __init__(self, testing): self._testing = testing def before_run(self, run_context): return session_run_hook.SessionRunArgs(fetches=stage_1_0) def after_run(self, run_context, run_values): self._testing.assertNear(0.3 + 0.5 + 0.7, run_context.session.run(var), 0.1) self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1, run_context.session.run(stage_2), 0.1) def step_fn(step_context): self.assertNear(0.3, step_context.session.run(stage_0), 0.1) return step_context.run_with_hooks(fetches=stage_1_1) with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session: self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn)) def test_step_fn_has_the_same_hooks_behavior_without_recovery(self): with ops.Graph().as_default(): var = resource_variable_ops.ResourceVariable(0.0) stage_0 = state_ops.assign_add(var, 0.3) stage_1_0 = state_ops.assign_add(var, 0.7) with ops.control_dependencies([stage_1_0]): stage_1_1 = state_ops.assign_add(var, 0.5) stage_2 = state_ops.assign_add(var, 1.1) class Hook(session_run_hook.SessionRunHook): def __init__(self, testing): self._testing = testing def before_run(self, run_context): return session_run_hook.SessionRunArgs(fetches=stage_1_0) def after_run(self, run_context, run_values): self._testing.assertNear(0.3 + 0.5 + 0.7, run_context.session.run(var), 0.1) self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1, run_context.session.run(stage_2), 0.1) def step_fn(step_context): self.assertNear(0.3, step_context.session.run(stage_0), 0.1) return step_context.run_with_hooks(fetches=stage_1_1) with monitored_session.SingularMonitoredSession( hooks=[Hook(self)]) as session: self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn)) def test_step_fn_with_hooks_and_request_stop(self): with ops.Graph().as_default(): trace_the_hook = {'before_run': False, 'after_run': False} class Hook(session_run_hook.SessionRunHook): def before_run(self, run_context): trace_the_hook['before_run'] = True def after_run(self, run_context, run_values): trace_the_hook['after_run'] = True def step_fn(step_context): step_context.request_stop() with monitored_session.MonitoredSession(hooks=[Hook()]) as session: self.assertEqual(None, session.run_step_fn(step_fn)) self.assertTrue(session.should_stop()) # `step_context.request_stop()` in a step_fn interrupts the flow of # running the hooks. self.assertFalse(trace_the_hook['before_run']) self.assertFalse(trace_the_hook['after_run']) def test_recovers_from_an_exception_in_step_fn(self): trace_the_exception = {'run_already': False} with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) def step_fn(step_context): if not trace_the_exception['run_already']: trace_the_exception['run_already'] = True raise errors_impl.AbortedError(None, None, 'Abort') return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2}) with monitored_session.MonitoredSession() as session: self.assertNear(3.2, session.run_step_fn(step_fn), 0.1) self.assertTrue(trace_the_exception['run_already']) def test_recovers_from_an_exception_in_step_fn_after_hooks(self): trace_the_exception = {'run_already': False, 'side_effect_counter': 0} with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) graph_state = variables.VariableV1(0.0) graph_side_effect = state_ops.assign_add(graph_state, 0.31) def step_fn(step_context): trace_the_exception['side_effect_counter'] += 1 step_context.session.run(graph_side_effect) value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2}) if not trace_the_exception['run_already']: trace_the_exception['run_already'] = True raise errors_impl.AbortedError(None, None, 'Abort') return value with self.cached_session() as test_session: with monitored_session.MonitoredSession( CountingSessionCreator(test_session)) as session: session.run(variables.global_variables_initializer()) self.assertNear(3.2, session.run_step_fn(step_fn), 0.1) self.assertTrue(trace_the_exception['run_already']) # Make sure the rest of the body of the step_fn is re-executed upon # AbortedError: self.assertEqual(2, trace_the_exception['side_effect_counter']) self.assertNear(0.62, session.run(graph_state), 0.1) def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self): trace_the_exception = {'run_already': False} with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) def step_fn(step_context): if not trace_the_exception['run_already']: trace_the_exception['run_already'] = True raise errors_impl.AbortedError(None, None, 'Abort') value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2}) return value with monitored_session.SingularMonitoredSession() as session: with self.assertRaisesRegex(errors_impl.AbortedError, 'Abort'): self.assertNear(3.2, session.run_step_fn(step_fn), 0.1) self.fail() self.assertTrue(trace_the_exception['run_already']) def test_step_fn_exception_from_before_run(self): trace_the_exception = {'run_already': False, 'side_effect_counter': 0} with ops.Graph().as_default(): c = array_ops.placeholder(dtypes.float32) v = array_ops.identity(c) vv = constant_op.constant(3.2) graph_state = variables.VariableV1(0.0) graph_side_effect = state_ops.assign_add(graph_state, 0.31) class Hook(session_run_hook.SessionRunHook): def __init__(self, testing): self._testing = testing def before_run(self, run_context): if not trace_the_exception['run_already']: trace_the_exception['run_already'] = True raise errors_impl.AbortedError(None, None, 'Abort') return session_run_hook.SessionRunArgs(fetches=vv) def after_run(self, run_context, run_values): self._testing.assertNear(3.2, run_values.results, 0.1) def step_fn(step_context): trace_the_exception['side_effect_counter'] += 1 step_context.session.run(graph_side_effect) return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3}) with self.cached_session() as test_session: with monitored_session.MonitoredSession( CountingSessionCreator(test_session), hooks=[Hook(self)]) as session: test_session.run(variables.global_variables_initializer()) self.assertNear(1.3, session.run_step_fn(step_fn), 0.1) self.assertEqual(2, trace_the_exception['side_effect_counter']) self.assertNear(0.62, session.run(graph_state), 0.1) class SingularMonitoredSessionTest(test.TestCase): """Tests SingularMonitoredSession.""" def test_handles_initialization(self): with ops.Graph().as_default(): a_var = variables.VariableV1(0) with monitored_session.SingularMonitoredSession() as session: # If it's not initialized, following statement raises an error. self.assertEqual(0, session.run(a_var)) def test_do_not_handle_aborted_error(self): with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() class _RaiseAbortedHook(session_run_hook.SessionRunHook): def before_run(self, run_context): raise errors_impl.AbortedError(None, None, 'Abort') with monitored_session.SingularMonitoredSession( hooks=[_RaiseAbortedHook()]) as session: with self.assertRaises(errors_impl.AbortedError): self.assertEqual(0, session.run(gstep)) with self.assertRaises(errors_impl.AbortedError): with monitored_session.SingularMonitoredSession( hooks=[_RaiseAbortedHook()]) as session: self.assertEqual(0, session.run(gstep)) def test_exit_cleanly_on_out_of_range_exception(self): # Tests that we stop cleanly when OutOfRange is raised. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None, 'EOI')) session = monitored_session.SingularMonitoredSession(hooks=[hook]) # session should cleanly exit from the context. with session: self.assertEqual(0, session.run(gstep)) self.assertFalse(session.should_stop()) # Here at step 1, the hook triggers and raises OutOfRange. The # session should go into should_stop() mode. It should raise the # exception. So next step should not be executed. session.run(do_step) self.assertTrue(False) self.assertTrue(session.should_stop()) def test_regular_exception_reported_to_coord_pass_through_run(self): # Tests that regular exceptions reported to the coordinator from a thread # passes through a "run()" call within a "with MonitoredSession" block and # set the session in stop mode. with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() session = monitored_session.SingularMonitoredSession() run_performed_without_error = False with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'): with session: self.assertEqual(0, session.run(gstep)) # Report an exception through the coordinator. try: raise RuntimeError('a thread wants to stop') except RuntimeError as e: session._coordinated_creator.coord.request_stop(e) # Call run() which should perform normally. self.assertEqual(0, session.run(gstep)) run_performed_without_error = True self.assertTrue(run_performed_without_error) def test_stop_cleanly_when_no_exception_in_with_body(self): # Tests that regular exceptions pass through with ops.Graph().as_default(): gstep = training_util.get_or_create_global_step() do_step = state_ops.assign_add(gstep, 1) session = monitored_session.SingularMonitoredSession() with session: self.assertEqual(1, session.run(do_step)) self.assertEqual(2, session.run(do_step)) self.assertFalse(session.should_stop()) # Should have closed. self.assertTrue(session.should_stop()) self.assertEqual(None, session.raw_session()) def test_graph(self): with ops.Graph().as_default() as g: with monitored_session.SingularMonitoredSession() as session: self.assertEqual(g, session.graph) def test_raw_session(self): with ops.Graph().as_default(): with monitored_session.SingularMonitoredSession() as session: self.assertTrue(isinstance(session.raw_session(), session_lib.Session)) if __name__ == '__main__': test.main()
arrow_tests.py
# Copyright (c) 2019 Michael Vilim # # This file is part of the bamboo library. It is currently hosted at # https://github.com/mvilim/bamboo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase import numpy as np import io as io from bamboo_tests.test_utils import df_equality # because of a conflict between our compiled arrow and pyarrow shared libraries (TBD) and static linking has another problem, pyarrow and bamboo cannot be both run in the same interpreter, so we have to run these functions in another process to generate arrow data import multiprocessing as mp FIELD_NAME = 'arr' def pyarrow_runner(pipe): while True: r = pipe.recv() if r is None: pipe.close() break else: pipe.send(r()) def convert(arr, create_list=True): import pyarrow as pa sink = pa.BufferOutputStream() batch = pa.RecordBatch.from_arrays([arr], [FIELD_NAME]) writer = pa.RecordBatchStreamWriter(sink, batch.schema) writer.write_batch(batch) writer.close() sink.flush() if create_list: l = np.array(arr).tolist() else: l = None return bytes(sink.getvalue()), l def create_int8(): import pyarrow as pa return convert(pa.array([1, 2], type=pa.int8())) def create_int32(): import pyarrow as pa return convert(pa.array([1, 2], type=pa.int32())) def create_int64(): import pyarrow as pa return convert(pa.array([1, 2], type=pa.int64())) def create_null_int64(): import pyarrow as pa return convert(pa.array([1, None], type=pa.int64())) def create_time32(): import pyarrow as pa return convert(pa.array(np.array([1, 2], dtype='int32'), pa.time32('s'))) def create_null_time32(): import pyarrow as pa return convert(pa.array(np.array([1, None], dtype='int32'), pa.time32('s'))) # arrow complains about the null int def create_float16(): import pyarrow as pa return convert(pa.array([np.float16(1), np.float16(2)], type=pa.float16())) def create_float32(): import pyarrow as pa return convert(pa.array([np.float32(1), np.float32(2)], type=pa.float32())) def create_float64(): import pyarrow as pa return convert(pa.array([np.float64(1), np.float64(2)], type=pa.float64())) def create_null_float64(): import pyarrow as pa return convert(pa.array([np.float64(1), None], type=pa.float64())) def create_binary(): import pyarrow as pa return convert(pa.array('test', type=pa.binary())) def create_bool(): import pyarrow as pa return convert(pa.array([False, True], type=pa.bool_())) def create_dictionary(): import pyarrow as pa indices = pa.array([0, 1, 0, 1, 2, 0, None, 2]) dictionary = pa.array([u'foo', u'bar', u'baz']) return convert(pa.DictionaryArray.from_arrays(indices, dictionary), create_list=False) def create_list_struct(): import pyarrow as pa return convert(pa.array([{'x': 1, 'y': [{'a': 3, 'b': 6}]}, {'x': 2, 'y': [{'a': 4, 'b': 7}, {'a': 5, 'b': 8}]}]), create_list=False) def create_list_of_list(): import pyarrow as pa return convert(pa.array([[1, 2, None], None, [3]]), create_list=False) def create_flatten(): import pyarrow as pa return convert(pa.array([{'x': 1, 'y': [{'a': 3, 'b': 6}]}, {'x': 2, 'y': [{'a': 4, 'b': 7}, {'a': 5, 'b': 8}]}]), create_list=False) class ArrowTests(TestCase): def pa(self, f): type(self)._parent_pipe.send(f) return type(self)._parent_pipe.recv() @classmethod def stop(cls): cls._parent_pipe.send(None) cls._runner.join() @classmethod def setUpClass(cls): context = mp.get_context('spawn') cls._parent_pipe, child_pipe = context.Pipe() cls._runner = context.Process(target=pyarrow_runner, args=(child_pipe,)) cls._runner.start() @classmethod def tearDownClass(cls): cls.stop() def array_convert(self, b): import bamboo_cpp_bind as bamboo_cpp node = bamboo_cpp.convert_arrow(io.BytesIO(b)) return node.get_list().get_field(FIELD_NAME) def assert_array(self, b, arr): node = self.array_convert(b) self.assertListEqual(node.get_values().tolist(), arr) def test_int8(self): b, arr = self.pa(create_int8) self.assert_array(b, arr) def test_int32(self): b, arr = self.pa(create_int32) self.assert_array(b, arr) def test_int64(self): b, arr = self.pa(create_int64) self.assert_array(b, arr) def test_null_int64(self): b, arr = self.pa(create_null_int64) node = self.array_convert(b) self.assertListEqual(node.get_values().tolist(), [1]) self.assertListEqual(node.get_null_indices().tolist(), [1]) def test_time32(self): with self.assertRaises(RuntimeError) as context: b, arr = self.pa(create_time32) self.assert_array(b, arr) self.assertTrue('not implemented' in str(context.exception)) # arrow does not support null time32 #def test_null_time32(self): # with self.assertRaises(TypeError) as context: # arr = pa.array(np.array([1, None], dtype='int32'), pa.time32('s')) # self.assert_array(arr) def test_float16(self): b, arr = self.pa(create_float16) self.assert_array(b, arr) def test_float32(self): b, arr = self.pa(create_float32) self.assert_array(b, arr) def test_float64(self): b, arr = self.pa(create_float64) self.assert_array(b, arr) def test_null_float64(self): b, arr = self.pa(create_null_float64) node = self.array_convert(b) self.assertListEqual(node.get_values().tolist(), [1]) self.assertListEqual(node.get_null_indices().tolist(), [1]) def test_binary(self): b, arr = self.pa(create_binary) with self.assertRaises(RuntimeError) as context: self.assert_array(b, arr) self.assertTrue('not implemented' in str(context.exception)) def test_bool(self): b, arr = self.pa(create_bool) self.assert_array(b, arr) def test_dictionary(self): b, arr = self.pa(create_dictionary) node = self.array_convert(b) self.assertListEqual(node.get_values().tolist(), ['foo', 'bar', 'foo', 'bar', 'baz', 'foo', 'baz']) self.assertListEqual(node.get_null_indices().tolist(), [6]) self.assertEqual(node.get_size(), 8) def test_list_struct(self): b, arr = self.pa(create_list_struct) node = self.array_convert(b) self.assertEqual(node.get_field('y').get_list().get_size(), 3) self.assertListEqual(node.get_field('y').get_list().get_field('b').get_values().tolist(), [6, 7, 8]) def test_list_of_list(self): b, arr = self.pa(create_list_of_list) node = self.array_convert(b) self.assertEqual(node.get_size(), 3) self.assertListEqual(node.get_index().tolist(), [3, 1]) self.assertListEqual(node.get_null_indices().tolist(), [1]) self.assertEqual(node.get_list().get_size(), 4) self.assertListEqual(node.get_list().get_values().tolist(), [1, 2, 3]) self.assertListEqual(node.get_list().get_null_indices().tolist(), [2]) def test_flatten(self): from bamboo import from_arrow b, arr = self.pa(create_flatten) node = from_arrow(io.BytesIO(b)) df = node.flatten() df_equality(self, {'x': [1, 2, 2], 'a': [3, 4, 5], 'b': [6, 7, 8]}, df)
client.py
import json import logging import socket import threading from cli_validator import port_validation, ip_validation DEFAULT_PORT = 9090 DEFAULT_IP = "127.0.0.1" END_MESSAGE_FLAG = "CRLF" # Настройки логирования logging.basicConfig( format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s", handlers=[logging.FileHandler("./logs/client.log")], level=logging.INFO, ) logger = logging.getLogger(__name__) class Client: def __init__(self, server_ip: str, port_number: int) -> None: self.server_ip = server_ip self.port_number = port_number self.sock = None self.new_connection() # Авторизуемся self.send_auth() # Поток чтения данных от сервера t = threading.Thread(target=self.read_message) t.daemon = True t.start() # Работа с данными, поступающими от пользователя self.user_processing() def new_connection(self): """Осуществляет новое соединение по сокету""" ip, port = self.server_ip, self.port_number sock = socket.socket() sock.setblocking(1) sock.connect((ip, port)) self.sock = sock logging.info(f"Успешное соединение с сервером {ip}:{port}") def send_reg(self, password): """Логика регистрации пользователя в системе""" print("*Новая регистрация в системе*") while True: input_username = input("Введите ваше имя пользователя (ник) -> ") if input_username == "": print("Имя пользователя не может быть пустым!") else: data = json.dumps( {"password": password, "username": input_username}, ensure_ascii=False, ) self.sock.send(data.encode()) logger.info(f"Отправка данных серверу: '{data}'") # Получаем данные с сервера response = json.loads(self.sock.recv(1024).decode()) if not response["result"]: raise ValueError( f"Не удалось осуществить регистрацию, ответ сервера {response}, более подробно см логи сервера" ) logger.info("Успешно зарегистрировались") break def send_auth(self): """Логика авторизации клиента""" login_iter = 1 while True: # Отдельные строки для объяснения механизма авторизации при первом входе req_password_str = "Введите пароль авторизации" req_password_str += ( "\nЕсли это ваш первый вход в систему, то он будет использоваться для последующей авторизации в системе -> " if login_iter == 1 else " -> " ) user_password = input(req_password_str) if user_password != "": data = json.dumps({"password": user_password}, ensure_ascii=False) # Отправляем сообщение self.sock.send(data.encode()) logger.info(f"Отправка данных серверу: '{data}'") # Получаем данные с сервера response = json.loads(self.sock.recv(1024).decode()) # Если успешно авторизовались if response["result"]: print( "Авторизация прошла успешно, можете вводить сообщения для отправки:" ) break # Если авторизация не удалась elif response["description"] == "wrong auth": print("Неверный пароль!") # Делаем новое соединение # т.к. сервер рвет соединение, если авторизация не удалась self.new_connection() # Если это первый вход с таким ip-адресом, то необходима регистрация elif response["description"] == "registration required": self.new_connection() self.send_reg(user_password) self.new_connection() else: raise ValueError( f"Получили неожиданный ответ от сервера: {response}" ) else: print("Пароль не может быть пустым") login_iter += 1 def read_message(self): """Чтение сообщения""" data = "" while True: # Получаем данные и собираем их по кусочкам chunk = self.sock.recv(1024) data += chunk.decode() # Если это конец сообщения, то значит, что мы все собрали и можем обратно отдавать клиенту if END_MESSAGE_FLAG in data: logger.info(f"Прием данных от сервера: '{data}'") data = data.replace(END_MESSAGE_FLAG, "") data = json.loads(data) message_text, user_name = data["text"], data["username"] print(f"[{user_name}] {message_text}") data = "" # Если приняли часть данных - сообщаем else: logger.info(f"Приняли часть данных от сервера: '{data}'") def send_message(self, message: str): """Отправка сообщения""" # Добавляем флаг конца сообщения (по-другому я не знаю как передавать больше 1024 и не разрывать соединение) message += END_MESSAGE_FLAG # Отправляем сообщение self.sock.send(message.encode()) logger.info(f"Отправка данных серверу: '{message}'") def user_processing(self): """Обработка ввода сообщений пользователя""" while True: msg = input() # Если сообщение exit if msg == "exit": break self.send_message(msg) def __del__(self): if self.sock: self.sock.close() logger.info("Разрыв соединения с сервером") def main(): port_input = input("Введите номер порта сервера -> ") port_flag = port_validation(port_input) # Если некорректный ввод if not port_flag: port_input = DEFAULT_PORT print(f"Выставили порт {port_input} по умолчанию") ip_input = input("Введите ip-адрес сервера -> ") ip_flag = ip_validation(ip_input) # Если некорректный ввод if not ip_flag: ip_input = DEFAULT_IP print(f"Выставили ip-адрес {ip_input} по умолчанию") client = Client(ip_input, int(port_input)) if __name__ == "__main__": main()
Test_setLeds.py
### If you dont have an led strip then use this(Test_setLeds.py) instead of setLeds.py import threading import socket import select PORT = 4444 bindsocket = socket.socket() bindsocket.bind(('', PORT)) bindsocket.listen(5) print("listening...") def clientInputLoop(sock, fromaddr): while True: try: #read led data from the socket clientData = sock.recv(128).decode('utf-8').strip() if clientData == '': break print(fromaddr, '->', clientData) except Exception as e: sock.shutdown(2) # 0 = done receiving, 1 = done sending, 2 = both1 sock.close() # connection error event here, maybe reconnect print('connection error:', e) return while True: newsocket, fromaddr = bindsocket.accept() print('Connection from:', fromaddr) t1 = threading.Thread(target=clientInputLoop, args=(newsocket,fromaddr,)) t1.start()
test_sanity_sample.py
""" Copyright (c) 2019-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import os import shlex import signal import subprocess import sys import tempfile import threading import time from enum import Enum from enum import auto from pathlib import Path from typing import Dict import pytest import torch # pylint: disable=redefined-outer-name from examples.common.optimizer import get_default_weight_decay from examples.common.sample_config import SampleConfig from examples.common.utils import get_name from examples.common.utils import is_staged_quantization from nncf.api.compression import CompressionLevel from nncf.common.quantization.structs import QuantizerConfig from nncf.config import NNCFConfig from nncf.hw_config import HWConfigType from tests.conftest import EXAMPLES_DIR from tests.conftest import PROJECT_ROOT from tests.conftest import TEST_ROOT NUM_DEVICES = torch.cuda.device_count() if torch.cuda.is_available() else 1 class Command: def __init__(self, cmd, path=None): self.cmd = cmd self.process = None self.exec_time = -1 self.output = [] # store output here self.kwargs = {} self.timeout = False self.path = path # set system/version dependent "start_new_session" analogs if sys.platform == "win32": self.kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) elif sys.version_info < (3, 2): # assume posix self.kwargs.update(preexec_fn=os.setsid) else: # Python 3.2+ and Unix self.kwargs.update(start_new_session=True) def kill_process_tree(self, pid): try: if sys.platform != "win32": os.killpg(pid, signal.SIGKILL) else: subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)]) except OSError as err: print(err) def run(self, timeout=3600, assert_returncode_zero=True): if torch.cuda.is_available(): torch.cuda.empty_cache() # See runs_subprocess_in_precommit for more info on why this is needed def target(): start_time = time.time() self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=1, cwd=self.path, **self.kwargs) self.timeout = False self.output = [] for line in self.process.stdout: line = line.decode('utf-8') self.output.append(line) sys.stdout.write(line) sys.stdout.flush() self.process.stdout.close() self.process.wait() self.exec_time = time.time() - start_time thread = threading.Thread(target=target) thread.start() thread.join(timeout) if thread.is_alive(): try: print("Error: process taking too long to complete--terminating" + ", [ " + self.cmd + " ]") self.kill_process_tree(self.process.pid) self.exec_time = timeout self.timeout = True thread.join() except OSError as e: print(self.process.pid, "Exception when try to kill task by PID, " + e.strerror) raise returncode = self.process.wait() print("Process returncode = " + str(returncode)) if assert_returncode_zero: assert returncode == 0, "Process exited with a non-zero exit code {}; output:{}".format( returncode, "".join(self.output)) return returncode def get_execution_time(self): return self.exec_time class ConfigFactory: """Allows to modify config file before test run""" def __init__(self, base_config, config_path): self.config = base_config self.config_path = str(config_path) def serialize(self): with open(self.config_path, 'w') as f: json.dump(self.config, f) return self.config_path def __getitem__(self, item): return self.config[item] def __setitem__(self, key, value): self.config[key] = value def create_command_line(args, sample_type): python_path = PROJECT_ROOT.as_posix() executable = EXAMPLES_DIR.joinpath(sample_type, 'main.py').as_posix() cli_args = " ".join(key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items()) return "PYTHONPATH={path} {python_exe} {main_py} {args}".format( path=python_path, main_py=executable, args=cli_args, python_exe=sys.executable ) SAMPLE_TYPES = ["classification", "semantic_segmentation", "object_detection"] DATASETS = { "classification": ["mock_32x32", "mock_32x32", "mock_32x32", "mock_32x32"], "semantic_segmentation": ["camvid", "camvid"], "object_detection": ["voc"], } CONFIGS = { "classification": [TEST_ROOT.joinpath("data", "configs", "squeezenet1_1_cifar10_rb_sparsity_int8.json"), TEST_ROOT.joinpath("data", "configs", "resnet18_cifar100_bin_xnor.json"), TEST_ROOT.joinpath("data", "configs", "resnet18_cifar10_staged_quant.json"), TEST_ROOT.joinpath("data", "configs", "resnet18_pruning_magnitude.json")], "semantic_segmentation": [TEST_ROOT.joinpath("data", "configs", "unet_camvid_int8.json"), TEST_ROOT.joinpath("data", "configs", "unet_camvid_rb_sparsity.json")], "object_detection": [TEST_ROOT.joinpath("data", "configs", "ssd300_vgg_voc_int8.json")] } BATCHSIZE_PER_GPU = { "classification": [256, 256, 256, 128], "semantic_segmentation": [2, 2], "object_detection": [128], } DATASET_PATHS = { "classification": { x: lambda dataset_root: dataset_root if dataset_root else os.path.join( tempfile.gettempdir(), x) for x in DATASETS["classification"] }, "semantic_segmentation": { DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "camvid"), DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "camvid") }, "object_detection": { DATASETS["object_detection"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "voc") }, } CONFIG_PARAMS = list() for sample_type in SAMPLE_TYPES: for tpl in list(zip(CONFIGS[sample_type], DATASETS[sample_type], BATCHSIZE_PER_GPU[sample_type])): CONFIG_PARAMS.append((sample_type,) + tpl) def update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict): if algo_dict["algorithm"] == "rb_sparsity": return if 'initializer' not in algo_dict: algo_dict['initializer'] = {'batchnorm_adaptation': {'num_bn_adaptation_samples': 5, 'num_bn_forget_samples': 5}} else: algo_dict['initializer'].update({'batchnorm_adaptation': {'num_bn_adaptation_samples': 5, 'num_bn_forget_samples': 5}}) @pytest.fixture(params=CONFIG_PARAMS, ids=["-".join([p[0], p[1].name, p[2], str(p[3])]) for p in CONFIG_PARAMS]) def config(request, dataset_dir): sample_type, config_path, dataset_name, batch_size = request.param dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir) with config_path.open() as f: jconfig = json.load(f) if "checkpoint_save_dir" in jconfig.keys(): del jconfig["checkpoint_save_dir"] # Use a reduced number of BN adaptation samples for speed if "compression" in jconfig: if isinstance(jconfig["compression"], list): algos_list = jconfig["compression"] for algo_dict in algos_list: update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict) else: algo_dict = jconfig["compression"] update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict) jconfig["dataset"] = dataset_name return { "sample_type": sample_type, 'nncf_config': jconfig, "model_name": jconfig["model"], "dataset_path": dataset_path, "batch_size": batch_size, } @pytest.fixture(scope="module") def case_common_dirs(tmp_path_factory): return { "checkpoint_save_dir": str(tmp_path_factory.mktemp("models")) } @pytest.mark.parametrize(" multiprocessing_distributed", (True, False), ids=['distributed', 'dataparallel']) def test_pretrained_model_eval(config, tmp_path, multiprocessing_distributed): config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') args = { "--mode": "test", "--data": config["dataset_path"], "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": config["batch_size"] * NUM_DEVICES, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue "--dist-url": "tcp://127.0.0.1:8987" } if not torch.cuda.is_available(): args["--cpu-only"] = True elif multiprocessing_distributed: args["--multiprocessing-distributed"] = True runner = Command(create_command_line(args, config["sample_type"])) runner.run() @pytest.mark.parametrize( "multiprocessing_distributed", [ pytest.param(True, marks=pytest.mark.dependency(name="train_distributed")), pytest.param(False, marks=pytest.mark.dependency(name="train_dataparallel"))], ids=['distributed', 'dataparallel']) def test_pretrained_model_train(config, tmp_path, multiprocessing_distributed, case_common_dirs): checkpoint_save_dir = os.path.join(case_common_dirs["checkpoint_save_dir"], "distributed" if multiprocessing_distributed else "data_parallel") config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') args = { "--mode": "train", "--data": config["dataset_path"], "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": config["batch_size"] * NUM_DEVICES, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue "--epochs": 2, "--checkpoint-save-dir": checkpoint_save_dir, "--dist-url": "tcp://127.0.0.1:8989" } if not torch.cuda.is_available(): args["--cpu-only"] = True elif multiprocessing_distributed: args["--multiprocessing-distributed"] = True runner = Command(create_command_line(args, config["sample_type"])) runner.run() last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth") assert os.path.exists(last_checkpoint_path) assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL) @pytest.mark.parametrize( "multiprocessing_distributed", [ pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])), pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))], ids=['distributed', 'dataparallel']) def test_trained_model_eval(config, tmp_path, multiprocessing_distributed, case_common_dirs): config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') ckpt_path = os.path.join(case_common_dirs["checkpoint_save_dir"], "distributed" if multiprocessing_distributed else "data_parallel", get_name(config_factory.config) + "_last.pth") args = { "--mode": "test", "--data": config["dataset_path"], "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": config["batch_size"] * NUM_DEVICES, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue "--weights": ckpt_path, "--dist-url": "tcp://127.0.0.1:8987" } if not torch.cuda.is_available(): args["--cpu-only"] = True elif multiprocessing_distributed: args["--multiprocessing-distributed"] = True runner = Command(create_command_line(args, config["sample_type"])) runner.run() def get_resuming_checkpoint_path(config_factory, multiprocessing_distributed, checkpoint_save_dir): return os.path.join(checkpoint_save_dir, "distributed" if multiprocessing_distributed else "data_parallel", get_name(config_factory.config) + "_last.pth") @pytest.mark.parametrize( "multiprocessing_distributed", [ pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])), pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))], ids=['distributed', 'dataparallel']) def test_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs): checkpoint_save_dir = os.path.join(str(tmp_path), "models") config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed, case_common_dirs["checkpoint_save_dir"]) if "max_iter" in config_factory.config: config_factory.config["max_iter"] += 2 args = { "--mode": "train", "--data": config["dataset_path"], "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": config["batch_size"] * NUM_DEVICES, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue "--epochs": 3, "--checkpoint-save-dir": checkpoint_save_dir, "--resume": ckpt_path, "--dist-url": "tcp://127.0.0.1:8986" } if not torch.cuda.is_available(): args["--cpu-only"] = True elif multiprocessing_distributed: args["--multiprocessing-distributed"] = True runner = Command(create_command_line(args, config["sample_type"])) runner.run() last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth") assert os.path.exists(last_checkpoint_path) assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL) @pytest.mark.parametrize( "multiprocessing_distributed", [ pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])), pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))], ids=['distributed', 'dataparallel']) def test_export_with_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs): config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed, case_common_dirs["checkpoint_save_dir"]) onnx_path = os.path.join(str(tmp_path), "model.onnx") args = { "--mode": "test", "--config": config_factory.serialize(), "--resume": ckpt_path, "--to-onnx": onnx_path } if not torch.cuda.is_available(): args["--cpu-only"] = True runner = Command(create_command_line(args, config["sample_type"])) runner.run() assert os.path.exists(onnx_path) def test_export_with_pretrained(tmp_path): config = SampleConfig() config.update({ "model": "resnet18", "dataset": "imagenet", "input_info": { "sample_size": [2, 3, 299, 299] }, "num_classes": 1000, "compression": {"algorithm": "magnitude_sparsity"} }) config_factory = ConfigFactory(config, tmp_path / 'config.json') onnx_path = os.path.join(str(tmp_path), "model.onnx") args = { "--mode": "test", "--config": config_factory.serialize(), "--pretrained": '', "--to-onnx": onnx_path } if not torch.cuda.is_available(): args["--cpu-only"] = True runner = Command(create_command_line(args, "classification")) runner.run() assert os.path.exists(onnx_path) @pytest.mark.parametrize(('algo', 'ref_weight_decay'), (('rb_sparsity', 0), ('const_sparsity', 1e-4), ('magnitude_sparsity', 1e-4), ('quantization', 1e-4))) def test_get_default_weight_decay(algo, ref_weight_decay): config = NNCFConfig() config.update({"compression": {"algorithm": algo}}) assert ref_weight_decay == get_default_weight_decay(config) def test_cpu_only_mode_produces_cpu_only_model(config, tmp_path, mocker): config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json') args = { "--data": config["dataset_path"], "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": config["batch_size"] * NUM_DEVICES, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue "--epochs": 1, "--cpu-only": True } # to prevent starting a not closed mlflow session due to memory leak of config and SafeMLFLow happens with a # mocked train function mocker.patch("examples.common.utils.SafeMLFLow") arg_list = [key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items()] command_line = " ".join(arg_list) if config["sample_type"] == "classification": import examples.classification.main as sample mocked_printing = mocker.patch('examples.classification.main.print_statistics') if is_staged_quantization(config['nncf_config']): mocker.patch("examples.classification.staged_quantization_worker.train_epoch_staged") mocker.patch("examples.classification.staged_quantization_worker.validate") import examples.classification.staged_quantization_worker as staged_worker mocked_printing = mocker.patch('examples.classification.staged_quantization_worker.print_statistics') staged_worker.validate.return_value = (0, 0) else: mocker.patch("examples.classification.main.train_epoch") mocker.patch("examples.classification.main.validate") sample.validate.return_value = (0, 0) elif config["sample_type"] == "semantic_segmentation": import examples.semantic_segmentation.main as sample mocked_printing = mocker.patch('examples.semantic_segmentation.main.print_statistics') import examples.semantic_segmentation.train mocker.spy(examples.semantic_segmentation.train.Train, "__init__") elif config["sample_type"] == "object_detection": import examples.object_detection.main as sample mocker.spy(sample, "train") mocked_printing = mocker.patch('examples.object_detection.main.print_statistics') sample.main(shlex.split(command_line)) if not config["sample_type"] == "object_detection": assert mocked_printing.call_count == 2 else: assert mocked_printing.call_count == 3 # pylint: disable=no-member if config["sample_type"] == "classification": if is_staged_quantization(config['nncf_config']): import examples.classification.staged_quantization_worker as staged_worker model_to_be_trained = staged_worker.train_epoch_staged.call_args[0][2] # model else: model_to_be_trained = sample.train_epoch.call_args[0][1] # model elif config["sample_type"] == "semantic_segmentation": model_to_be_trained = examples.semantic_segmentation.train.Train.__init__.call_args[0][1] # model elif config["sample_type"] == "object_detection": model_to_be_trained = sample.train.call_args[0][0] # net for p in model_to_be_trained.parameters(): assert not p.is_cuda class SampleType(Enum): CLASSIFICATION = auto() SEMANTIC_SEGMENTATION = auto() OBJECT_DETECTION = auto() class TestCaseDescriptor: config_name: str quantization_algo_params: Dict = {} sample_type: SampleType dataset_dir: Path dataset_name: str is_real_dataset: bool = False batch_size: int n_weight_quantizers: int n_activation_quantizers: int def batch(self, batch_size: int): self.batch_size = batch_size return self def get_config_path(self): return TEST_ROOT.joinpath("data", "configs", "hawq", self.config_name) def config(self, config_name: str): self.config_name = config_name return self def staged(self): self.quantization_algo_params = { "activations_quant_start_epoch": 0 } return self def sample(self, sample_type: SampleType): self.sample_type = sample_type return self def real_dataset(self, dataset_name: str): self.dataset_name = dataset_name self.is_real_dataset = True return self def mock_dataset(self, dataset_name: str): self.dataset_name = dataset_name self.dataset_dir = TEST_ROOT.joinpath("data", "mock_datasets", dataset_name) return self def num_weight_quantizers(self, n: int): self.n_weight_quantizers = n return self def num_activation_quantizers(self, n: int): self.n_activation_quantizers = n return self def __str__(self): return '_'.join([self.config_name, 'staged' if self.quantization_algo_params else '']) def get_config_update(self) -> Dict: sample_params = self.get_sample_params() return { **sample_params, 'target_device': 'VPU', 'compression': { 'algorithm': 'quantization', 'initializer': { 'precision': self.get_precision_section(), 'range': { "num_init_samples": 2 }, "batchnorm_adaptation": { "num_bn_adaptation_samples": 1, "num_bn_forget_samples": 1 } }, 'params': self.quantization_algo_params, } } def get_precision_section(self) -> Dict: raise NotImplementedError def get_sample_params(self) -> Dict: return {"dataset": self.dataset_name} def setup_spy(self, mocker): raise NotImplementedError def validate_spy(self): raise NotImplementedError class HAWQDescriptor(TestCaseDescriptor): batch_size_init: int = 0 get_qsetup_spy = None hessian_trace_estimator_spy = None def batch_for_init(self, batch_size_init: int): self.batch_size_init = batch_size_init return self def get_sample_params(self): result = super().get_sample_params() result.update({'batch_size_init': self.batch_size_init} if self.batch_size_init else {}) return result def get_precision_section(self) -> Dict: return {"type": "hawq", "num_data_points": 3, "iter_number": 1} def __str__(self): bs = f'_bs{self.batch_size_init}' if self.batch_size_init else '' return super().__str__() + '_hawq' + bs def setup_spy(self, mocker): from nncf.quantization.init_precision import HAWQPrecisionInitializer self.get_qsetup_spy = mocker.spy(HAWQPrecisionInitializer, "get_quantizer_setup_for_qconfig_sequence") from nncf.quantization.hessian_trace import HessianTraceEstimator self.hessian_trace_estimator_spy = mocker.spy(HessianTraceEstimator, "__init__") def validate_spy(self): qconfig_sequence = self.get_qsetup_spy.call_args[0][1] assert len(qconfig_sequence) == self.n_weight_quantizers all_precisions = {qc.num_bits for qc in qconfig_sequence} # with default compression ratio = 1.5 all precisions should be different from the default one assert all_precisions != {QuantizerConfig().num_bits} init_data_loader = self.hessian_trace_estimator_spy.call_args[0][5] expected_batch_size = self.batch_size_init if self.batch_size_init else self.batch_size assert init_data_loader.batch_size == expected_batch_size class AutoQDescriptor(TestCaseDescriptor): subset_ratio_: float = 1.0 BITS = [2, 4, 8] debug_dump: bool = False def subset_ratio(self, subset_ratio_: float): self.subset_ratio_ = subset_ratio_ return self def dump_debug(self, debug_dump: bool): self.debug_dump = debug_dump return self def get_precision_section(self) -> Dict: return {"type": "autoq", "bits": AutoQDescriptor.BITS, "iter_number": 2, "compression_ratio": 0.15, "eval_subset_ratio": self.subset_ratio_, "dump_init_precision_data": self.debug_dump} def __str__(self): sr = f'_sr{self.subset_ratio_}' if self.subset_ratio_ else '' dd = '_dump_debug' if self.debug_dump else '' return super().__str__() + '_autoq' + sr + dd def setup_spy(self, mocker): from nncf.quantization.algo import QuantizationBuilder self.builder_spy = mocker.spy(QuantizationBuilder, 'build_controller') def validate_spy(self): ctrl = self.builder_spy.spy_return final_bits = [qm.num_bits for qm in ctrl.all_quantizations.values()] assert set(final_bits) != {QuantizerConfig().num_bits} assert all([bit in AutoQDescriptor.BITS for bit in final_bits]) def resnet18_desc(x: TestCaseDescriptor): return x.config("resnet18_cifar10_mixed_int.json").sample(SampleType.CLASSIFICATION). \ mock_dataset('mock_32x32').batch(3).num_weight_quantizers(21).num_activation_quantizers(27) def inception_v3_desc(x: TestCaseDescriptor): return x.config("inception_v3_cifar10_mixed_int.json").sample(SampleType.CLASSIFICATION). \ mock_dataset('mock_32x32').batch(3).num_weight_quantizers(95).num_activation_quantizers(105) def ssd300_vgg_desc(x: TestCaseDescriptor): return x.config("ssd300_vgg_voc_mixed_int.json").sample(SampleType.OBJECT_DETECTION). \ mock_dataset('voc').batch(3).num_weight_quantizers(35).num_activation_quantizers(27) def unet_desc(x: TestCaseDescriptor): return x.config("unet_camvid_mixed_int.json").sample(SampleType.SEMANTIC_SEGMENTATION). \ mock_dataset('camvid').batch(3).num_weight_quantizers(23).num_activation_quantizers(23) def icnet_desc(x: TestCaseDescriptor): return x.config("icnet_camvid_mixed_int.json").sample(SampleType.SEMANTIC_SEGMENTATION). \ mock_dataset('camvid').batch(3).num_weight_quantizers(64).num_activation_quantizers(81) TEST_CASE_DESCRIPTORS = [ inception_v3_desc(HAWQDescriptor()), inception_v3_desc(HAWQDescriptor()).staged(), resnet18_desc(HAWQDescriptor()), resnet18_desc(HAWQDescriptor()).staged(), resnet18_desc(HAWQDescriptor()).batch_for_init(2), resnet18_desc(HAWQDescriptor()).batch_for_init(2).staged(), ssd300_vgg_desc(HAWQDescriptor()), ssd300_vgg_desc(HAWQDescriptor()).batch_for_init(2), unet_desc(HAWQDescriptor()), unet_desc(HAWQDescriptor()).batch_for_init(2), icnet_desc(HAWQDescriptor()), inception_v3_desc(AutoQDescriptor()).batch(2), inception_v3_desc(AutoQDescriptor()).staged(), resnet18_desc(AutoQDescriptor()).batch(2), resnet18_desc(AutoQDescriptor()).batch(2).staged().dump_debug(True), resnet18_desc(AutoQDescriptor()).subset_ratio(0.2).batch(2), resnet18_desc(AutoQDescriptor()).subset_ratio(0.2).staged(), ssd300_vgg_desc(AutoQDescriptor()).batch(2).dump_debug(True), unet_desc(AutoQDescriptor()).dump_debug(True), icnet_desc(AutoQDescriptor()) ] @pytest.fixture(params=TEST_CASE_DESCRIPTORS, ids=[str(d) for d in TEST_CASE_DESCRIPTORS]) def desc(request, dataset_dir): desc: TestCaseDescriptor = request.param config_path = desc.get_config_path() with config_path.open() as file: json_config = json.load(file) json_config.update(desc.get_config_update()) desc.config = json_config if desc.is_real_dataset: desc.dataset_dir = Path( dataset_dir if dataset_dir else os.path.join(tempfile.gettempdir(), desc.dataset_name)) return desc def test_precision_init(desc: TestCaseDescriptor, tmp_path, mocker): config_factory = ConfigFactory(desc.config, tmp_path / 'config.json') args = { "--data": str(desc.dataset_dir), "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": desc.batch_size, "--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue } if not torch.cuda.is_available(): args["--cpu-only"] = True arg_list = [key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items()] command_line = " ".join(arg_list) # Need to mock SafeMLFLow to prevent starting a not closed mlflow session due to memory leak of config and # SafeMLFLow, which happens with a mocked train function if desc.sample_type == SampleType.CLASSIFICATION: import examples.classification.main as sample mocker.patch("examples.classification.staged_quantization_worker.train_staged") mocker.patch("examples.classification.main.train") mocker.patch("examples.classification.main.SafeMLFLow") mocker.patch("examples.classification.staged_quantization_worker.SafeMLFLow") elif desc.sample_type == SampleType.SEMANTIC_SEGMENTATION: import examples.semantic_segmentation.main as sample mocker.patch("examples.semantic_segmentation.main.train") mocker.patch("examples.semantic_segmentation.main.SafeMLFLow") elif desc.sample_type == SampleType.OBJECT_DETECTION: import examples.object_detection.main as sample mocker.patch("examples.object_detection.main.train") mocker.patch("examples.object_detection.main.SafeMLFLow") desc.setup_spy(mocker) sample.main(shlex.split(command_line)) desc.validate_spy() @pytest.mark.parametrize('target_device', [x.value for x in HWConfigType]) def test_sample_propagates_target_device_cl_param_to_nncf_config(mocker, tmp_path, target_device): config_dict = { "input_info": { "sample_size": [1, 1, 32, 32], }, "compression": { "algorithm": "quantization" }, } config_factory = ConfigFactory(config_dict, tmp_path / 'config.json') args = { "--data": str(tmp_path), "--config": config_factory.serialize(), "--log-dir": tmp_path, "--batch-size": 1, "--target-device": target_device, } if not torch.cuda.is_available(): args["--cpu-only"] = True arg_list = [key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items()] command_line = " ".join(arg_list) import examples.classification.main as sample start_worker_mock = mocker.patch("examples.classification.main.start_worker") sample.main(shlex.split(command_line)) config = start_worker_mock.call_args[0][1].nncf_config assert config["target_device"] == target_device
threadLifecycle.py
import threading import time # A very simple method for our thread to execute def threadWorker(): # it is only at the point where the thread starts executing # that it's state goes from 'Runnable' to a 'Running' # state print("My Thread has entered the 'Running' State") # If we call the time.sleep() method then our thread # goes into a not-runnable state. We can do no further work # on this particular thread time.sleep(10) # Thread then completes its tasks and terminates print("My Thread is terminating") # At this point in time, the thread has no state # it hasn't been allocated any system resources myThread = threading.Thread(target=threadWorker) # When we call myThread.start(), Python allocates the necessary system # resources in order for our thread to run and then calls the thread's # run method. It goes from 'Starting' state to 'Runnable' but not running myThread.start() # Here we join the thread and when this method is called # our thread goes into a 'Dead' state. It has finished the # job that it was intended to do. myThread.join() print("My Thead has entered a 'Dead' state")
host_callback_test.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import itertools import logging import os import re import threading import time from typing import Callable, Optional, Sequence import unittest from unittest import skip, SkipTest from absl.testing import absltest from absl.testing import parameterized import jax from jax import ad_checkpoint from jax import core from jax.config import config from jax import dtypes from jax.experimental import host_callback as hcb from jax.experimental import PartitionSpec as P from jax.experimental import maps from jax.experimental import pjit from jax import lax from jax import numpy as jnp from jax._src import test_util as jtu from jax import tree_util from jax._src.lib import xla_client from jax._src.lib import xla_bridge xops = xla_client.ops import numpy as np config.parse_flags_with_absl() FLAGS = config.FLAGS class _TestingOutputStream(object): """Use as `output_stream` for tests.""" def __init__(self): self._output = [] self._test_method_name = None def write(self, what: str) -> None: print(f"output_stream[{self._test_method_name}]: {what}", end="") self._output.append(what) @property def output(self): return "".join(self._output) @property def output_sorted_by_device(self): # Assume that the output is a sequence of strings including metadata # and data, with metadata containing `device: xxx` by_device = [] # each element is a pair (device, str_list) for s in self._output: m = re.match(r".*device: (\S+)", s) if m: by_device.append((m.group(1), [])) assert by_device, f"output does not include 'device:': {self._output}" by_device[-1][1].append(s) sorted_by_device = sorted(by_device, key=lambda x: x[0]) return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device])) def __str__(self): return "TestingOutputStream" def reset(self): self._output = [] testing_stream = _TestingOutputStream() def fun1(a): """Function used for several `id_tap` tests.""" y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream) y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y) return y ** 2 # Some computation to make the gradient interesting def fun1_equiv(a): # Numerical equivalent of fun1 return (a * 2.) ** 2 def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False): """Conditionally print on testing_string""" if do_print: return hcb.id_print(arg, what=what, output_stream=testing_stream, tap_with_device=tap_with_device) else: return arg def local_devices(): # Tests require using not more than 2 devices. return jax.local_devices()[:2] ignore_jit_of_pmap_warning = partial( jtu.ignore_warning, message=".*jit-of-pmap.*") def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase, expected: str, what: str): """A variant that preprocesses the string to eliminate non-determinism in floating point values, and several uninteresting id_tap primitive params. """ # Sometimes we get floating points in the output; we round them def repl_floats(match_group): matched = match_group.group(0) if matched == ".": return matched x = np.around(float(matched), decimals=2) return f"{x:.2f}" what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what) what = re.sub(r"output_stream=[^\]\n,]*,?", "", what) what = re.sub(r"threshold=[^\]\n,]*,?", "", what) what = re.sub(r"bwd=[^\]\n]*", "", what) what = re.sub(r"out_trees=[^\]\n]*", "", what) what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what) what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what) # Empty lines what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE) def repl_func(match_group): matched = match_group.group(3) if "function _print_consumer" in matched: return match_group.group(1) + "=_print" else: return match_group.group(1) + "=..." what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what) tst.assertMultiLineStrippedEqual(expected, what) def helper_set_hlo_dump(): flags_str = os.getenv("XLA_FLAGS", "") import shutil dump_dir = "/tmp/xla_dump" os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}" if os.path.isdir(dump_dir): logging.warning("Deleting old XLA dump directory %s", dump_dir) shutil.rmtree(dump_dir) logging.warning("Setting XLA dump directory %s", dump_dir) # Clear any cached backends so new CPU backend will pick up the env var. xla_bridge.get_backend.cache_clear() def helper_print_optimized_hlo(fun, *args): backend = xla_bridge.get_backend() c = jax.xla_computation(fun, backend='cpu')(*args) print(re.sub(r", metadata.*", "", backend.compile(c).hlo_modules()[0].to_string())) def helper_log_ir(name, f_jax, *args, num_partitions=None, strip_metadata=False): print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}") jax_comp = jax.xla_computation(f_jax, backend='cpu')(*args) print(f"HLO[{name}]: {jax_comp.as_hlo_text()}") backend = xla_bridge.get_backend() if num_partitions is not None: num_replicas = 1 device_assignment = np.arange(num_partitions * num_replicas) device_assignment = np.reshape(device_assignment, (-1, num_partitions)) use_spmd_partitioning = num_partitions > 1 compile_options = xla_bridge.get_compile_options( num_replicas=num_replicas, num_partitions=num_partitions, device_assignment=device_assignment, use_spmd_partitioning=use_spmd_partitioning, ) else: compile_options = None jax_optimized_hlo = backend.compile( jax_comp, compile_options).hlo_modules()[0].to_string() if strip_metadata: jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo) print(f"Optimized HLO[{name}] for " f"platform {backend.platform}: {jax_optimized_hlo}") prev_xla_flags = None def setUpModule(): global prev_xla_flags # This will control the CPU devices. On TPU we always have 2 devices prev_xla_flags = jtu.set_host_platform_device_count(2) # Reset to previous configuration in case other test modules will be run. def tearDownModule(): prev_xla_flags() def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase, expected_2CPUs: str): """Check that the multi-device output is equal to the expected. The tests run with 2 devices if available, otherwise 1 device. We adjust the expected output here for 1 device. Args: expected_2CPUs: the expected output for 2 CPUs. If there is only one device, this is trimmed to the first device. If the current device_under_test is not a CPU, then we change the names """ expected = expected_2CPUs if len(local_devices()) == 1: start_device_1 = expected.find('device: cpu:1') if start_device_1 >= 0: expected = expected[0:start_device_1] def replace_device_name(m) -> str: return str(local_devices()[int(m.group(1))]) expected = re.sub(r'cpu:(\d+)', replace_device_name, expected) what = testing_stream.output_sorted_by_device return assertMultiLineStrippedEqual(tst, expected, what) class HostCallbackTapTest(jtu.JaxTestCase): def setUp(self): super().setUp() if jtu.device_under_test() == "gpu" and jax.device_count() > 1: raise SkipTest("host_callback broken on multi-GPU platforms (#6447)") testing_stream.reset() testing_stream._test_method_name = self._testMethodName self.old_flags = os.getenv("XLA_FLAGS", "") def tearDown(self) -> None: if os.getenv("XLA_FLAGS") != self.old_flags: os.environ["XLA_FLAGS"] = self.old_flags xla_bridge.get_backend.cache_clear() hcb.barrier_wait("HostCallbackTapTest.tearDown") super().tearDown() def test_tap_eval(self): self.assertAllClose((5. * 2.) ** 2, fun1(5.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ what: a * 2 10.00 what: y * 3 30.00""", testing_stream.output) def test_tap_with_tuple_results(self): def func2(x): x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream) return x1 + y1 self.assertEqual(3. * (2. + 3.), func2(3.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ ( 6.00 9.00 )""", testing_stream.output) def test_tap_with_dict_results(self): def func2(x): res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream) return res["a"] + res["b"] self.assertEqual(3. * (2. + 3.), func2(3.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ { a=6.00 b=9.00 }""", testing_stream.output) def test_tap_with_result(self): def func2(x): x1 = hcb.id_print((x * 2., x * 3.), result=x * 4., output_stream=testing_stream) return x1 self.assertEqual(3. * 4., func2(3.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ ( 6.00 9.00 )""", testing_stream.output) def test_tap_with_result_no_arg(self): def tap_func(arg, transforms): testing_stream.write(f"called tap_func with {arg}") def func2(x): x1 = hcb.id_tap(tap_func, None, result=x) return x1 self.assertEqual(3., func2(3.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, "called tap_func with None", testing_stream.output) def test_tap_result_unused(self): def tap_func(arg, transforms): testing_stream.write(f"called tap_func with {arg}") def func2(x): hcb.id_tap(tap_func, None) return x self.assertEqual(3., func2(3.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, "called tap_func with None", testing_stream.output) def test_tap_with_device(self): def func2(x): x1 = hcb.id_print((x * 2., x * 3.), result=x * 4., output_stream=testing_stream, tap_with_device=True) return x1 self.assertEqual(3. * 4., func2(3.)) hcb.barrier_wait() assertMultiDeviceOutputEqual(self, """ device: cpu:0 ( 6.00 9.00 )""") def test_tap_eval_exception(self): if not FLAGS.jax_host_callback_outfeed: raise SkipTest("TODO: implement error handling for customcall") # Simulate a tap error def tap_err(*args, **kwargs): raise ValueError("Some user message") def func(x): x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream) x2 = hcb.id_tap(tap_err, x1 + 1) x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream) return x3 with self.assertRaisesRegex( hcb.CallbackException, re.compile("There were exceptions during callback processing. Last one was:.*" "ValueError: Some user message", re.DOTALL)): func(0) hcb.barrier_wait() # We should have received everything before the error assertMultiLineStrippedEqual(self, """ what: x1 1 what: x3 3""", testing_stream.output) def test_tap_empty(self): """Tap empty arrays.""" hcb.id_print((), output_stream=testing_stream) hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ ( ) what: second ( 1.00 [] )""", testing_stream.output) def test_tap_jit_simple(self): jit_fun1 = jax.jit(lambda x: 3. * hcb.id_print( 2. * x, what="here", output_stream=testing_stream)) self.assertAllClose(6. * 5., jit_fun1(5.)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ what: here 10.00""", testing_stream.output) def test_tap_jit_no_invars(self): def func(): # jitted function does not take arguments return hcb.id_print(42, output_stream=testing_stream) self.assertAllClose(42, jax.jit(func)()) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ 42""", testing_stream.output) def test_tap_jit_multiple_invars(self): def func(x1, x2): return hcb.id_print(x1 + x2, output_stream=testing_stream) self.assertAllClose(42, jax.jit(func)(40, 2)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ 42""", testing_stream.output) def test_tap_jit_constant(self): def func(x): return hcb.id_print(42, result=x, output_stream=testing_stream) self.assertAllClose(5, jax.jit(func)(5)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ 42""", testing_stream.output) def test_tap_jit_sequence1(self): def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream) logging.info("%s: %s", self._testMethodName, jax.make_jaxpr(func)(1)) logging.info("%s: %s", self._testMethodName, jax.xla_computation(func, backend='cpu')(1).as_hlo_text()) self.assertEqual(2, jax.jit(func)(1)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2""", testing_stream.output) def test_tap_jit2(self): """A sequence of JIT.""" def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream) return x2 self.assertEqual(2, jax.jit(func)(1)) self.assertEqual(11, jax.jit(func)(10)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2 where: 1 10 where: 2 11""", testing_stream.output) def test_tap_jit_result_unused(self): """We can id_print even if we don't use the result.""" def func(x): hcb.id_print(x, where="1", output_stream=testing_stream) hcb.id_print(x + 1, where="2", output_stream=testing_stream) return x + 1 self.assertEqual(2, jax.jit(func)(1)) self.assertEqual(11, jax.jit(func)(10)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2 where: 1 10 where: 2 11""", testing_stream.output) def test_tap_jit_nested(self): def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) def func_nested(x): x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream) return x2 x3 = jax.jit(func_nested)(x1) return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream) self.assertEqual(3, jax.jit(func)(1)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: nested 2 where: 3 3""", testing_stream.output) def test_tap_jit_devices(self): """Running on multiple devices.""" logging.info("%s: has devices %s", self._testMethodName, local_devices()) def func(x, device_id): x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream) x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream) return x2 for d in local_devices(): self.assertEqual(112, jax.jit(func, device=d, static_argnums=1)(111, d.id)) hcb.barrier_wait() logging.info("%s: found output %s", self._testMethodName, testing_stream.output) self.assertEqual( len(local_devices()), len(re.findall(r"111", testing_stream.output))) self.assertEqual( len(local_devices()), len(re.findall(r"112", testing_stream.output))) @parameterized.named_parameters( jtu.cases_from_list( dict( testcase_name=f"_with_jit_{with_jit}", with_jit=with_jit) for with_jit in [True, False])) def test_tap_pytree(self, with_jit=False): def func(x, what=""): """Returns some pytrees depending on x""" if what == "pair_1_x": return (1, x) elif what == "pair_x_2x": return (x, 2 * x) elif what == "dict": return dict(a=2 * x, b=3 * x) else: assert False tap_count = 0 def tap_func(a, _, *, what=""): nonlocal tap_count tap_count += 1 self.assertEqual(func(5, what), a) transform = jax.jit if with_jit else lambda f: f for what in ("pair_1_x", "pair_x_2x", "dict"): transformed = transform( lambda x: hcb.id_tap( partial(tap_func, what=what), func(x, what), result=func(x * 2, what)) )(5) self.assertEqual(func(10, what), transformed) hcb.barrier_wait() # Wait for receivers to be done self.assertEqual(3, tap_count) @parameterized.named_parameters( jtu.cases_from_list( dict( testcase_name=f"_concurrent_{concurrent}", concurrent=concurrent) for concurrent in [True, False])) def test_tap_multiple(self, concurrent=False): """Call id_tap multiple times, concurrently or in sequence. """ if concurrent and jtu.device_under_test() in ["cpu", "gpu"]: # TODO(necula): if there is device side concurrency, outfeeds from # different computations can be interleaved. For example, it seems that # on GPU if multiple host threads run a jit computation, the multiple # computations are interleaved on the GPU. This can result in the outfeed # trains being interleaved, which will trigger an error. # The solution is to fix on GPU the receiving logic so that we can outfeed # the train as one tuple, and receive it one piece as a time. Then the # trains should be atomic. # See also b/160692602. raise SkipTest("concurrent id_tap not supported on CPU, GPU") received = set() count = 5 def pause_tap(idx, _): received.add(int(idx)) logging.info("Starting do_tap %s. Sleeping 1sec ...", idx) time.sleep(0.3) logging.info("Finish do_tap %s", idx) def do_tap(idx): jax.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx) if concurrent: threads = [ threading.Thread( name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,)) for idx in range(count) ] [t.start() for t in threads] [t.join() for t in threads] else: for idx in range(count): do_tap(idx) hcb.barrier_wait() self.assertEqual(received, set(range(count))) # TODO(necula): see comment for test_multiple_tap. Here we disable also # on TPU, because the barrier_wait runs on all devices, including on the CPU # where it would run into concurrency problems. @skip("Concurrency not supported") def test_tap_multiple_barriers(self): """Call barrier_wait concurrently.""" def pause_tap(*args, **kwargs): logging.info("pause_tap waiting") time.sleep(0.3) logging.info("pause_tap done") def long_run(x): return hcb.id_tap(pause_tap, x) jax.jit(long_run)(5.) def try_barrier(idx): logging.info("Starting test barrier %s", idx) hcb.barrier_wait() logging.info("Finished test barrier %s", idx) threads = [ threading.Thread( name=f"barrier_{idx}", target=try_barrier, args=(idx,)) for idx in range(3) ] [t.start() for t in threads] [t.join() for t in threads] @parameterized.named_parameters( jtu.cases_from_list( dict( testcase_name=f"_with_jit_{with_jit}", with_jit=with_jit) for with_jit in [True, False])) def test_tap_cond(self, with_jit=False): """A conditional""" def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream) x4 = lax.cond(x % 2 == 0, lambda x: hcb.id_print(x, where="cond_t", output_stream=testing_stream), lambda x: hcb.id_print(-1, where="cond_f", result=x, output_stream=testing_stream), x2 + 1) x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream) return x5 transform = jax.jit if with_jit else lambda f: f self.assertEqual(4, transform(func)(1)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2 where: cond_f -1 where: end 4""", testing_stream.output) @parameterized.named_parameters( jtu.cases_from_list( dict(testcase_name=f"_with_jit_{with_jit}", with_jit=with_jit) for with_jit in [True, False])) def test_tap_while_cond(self, with_jit=False): def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream) def body(x): x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream) x4 = lax.cond(x % 2 == 0, lambda x: hcb.id_print(x, where="w_b_t", output_stream=testing_stream), lambda x: hcb.id_print(-1, where="w_b_f", result=x, output_stream=testing_stream), x3 + 1) return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream) x10 = lax.while_loop(lambda x: x <= 3, body, x2) res = hcb.id_print(x10, where="end", output_stream=testing_stream) return res transform = jax.jit if with_jit else lambda f: f self.assertEqual(4, transform(func)(1)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2 where: w_b_1 2 where: w_b_t 3 where: w_b_2 3 where: w_b_1 3 where: w_b_f -1 where: w_b_2 4 where: end 4""", testing_stream.output) def test_tap_jit_while_pred_tap(self): """While with printing in the conditional.""" def func(x): x1 = hcb.id_print(x, where="1") x10 = lax.while_loop(lambda x: hcb.id_print(x < 3, where="w_p", output_stream=testing_stream), lambda x: hcb.id_print(x + 1, where="w_b", output_stream=testing_stream), x1) res = hcb.id_print(x10, where="3", output_stream=testing_stream) return res self.assertEqual(3, jax.jit(func)(1)) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: w_p True where: w_b 2 where: w_p True where: w_b 3 where: w_p False where: 3 3""", testing_stream.output) @parameterized.named_parameters( jtu.cases_from_list( dict( testcase_name=f"_with_jit_{with_jit}", with_jit=with_jit) for with_jit in [True, False])) def test_tap_scan_cond(self, with_jit=True): def func(x): x1 = hcb.id_print(x, where="1", output_stream=testing_stream) x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream) def body(c, x): x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream) x4 = lax.cond(x % 2 == 0, lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream), lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream), x3 + 1) return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream)) _, x10 = lax.scan(body, x2, jnp.arange(3)) res = hcb.id_print(x10, where="10", output_stream=testing_stream) return res if with_jit: func = jax.jit(func) res = func(1) self.assertAllClose(jnp.array([1, 2, 3]), res) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ where: 1 1 where: 2 2 where: s_1 0 where: s_t 1 where: s_2 1 where: s_1 1 where: s_f -1 where: s_2 2 where: s_1 2 where: s_t 3 where: s_2 3 where: 10 [1 2 3]""", testing_stream.output) testing_stream.reset() @parameterized.named_parameters( jtu.cases_from_list( dict( testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}", shape=shape, dtype=dtype, nr_args=nr_args) for nr_args in [1, 2] for shape in [(), (2,), (2, 3), (2, 3, 4)] for dtype in jtu.dtypes.all)) def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)): if dtype in (jnp.complex64, jnp.complex128, jnp.bool_): raise SkipTest(f"host_callback not implemented for {dtype}.") if dtype == np.bool_: args = [self.rng().choice(a=[True, False], size=shape)] else: args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)] if nr_args > 1: args = args * nr_args jit_fun1 = jax.jit(lambda xs: hcb.id_print( xs, a_new_test="************", testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}")) res = jit_fun1(args) self.assertAllClose(args, res, check_dtypes=True) def test_tap_jit_large(self): arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1)) jax.jit(hcb.id_print)(arg) def test_tap_jit_several_together(self): arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5)) jax.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32)) def test_tap_jit_interleaving(self): # Several jit's without data dependencies; they may interfere count = 0 # Count tap invocations nr_arrays = 5 def tap_func(arg, _): nonlocal count assert len(arg) == nr_arrays count += 1 # This is the function that we'll run multiple times def func(x, count): for i in range(count): x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1] return x x = jnp.array(1, dtype=np.int32) res = 0 for _ in range(10): # No dependencies between the jit invocations res += jax.jit(lambda x: func(x, 10))(x) hcb.barrier_wait() self.assertEqual(100, count) def test_tap_jit_tap_exception(self): if not FLAGS.jax_host_callback_outfeed: raise SkipTest("TODO: implement error handling for customcall") # Simulate a tap error def tap_err(*args, **kwargs): raise NotImplementedError def func(x): x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream) x2 = hcb.id_tap(tap_err, x1 + 1) x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream) return x3 res = jax.jit(func)(0) # No error yet with self.assertRaises(hcb.CallbackException): hcb.barrier_wait() # Even though the receiver thread raised, the main thread should still # return 3. self.assertEqual(3, res) # We should have received all others assertMultiLineStrippedEqual(self, """ what: x1 1 what: x3 3""", testing_stream.output) def test_tap_while(self): """Executing while, even without JIT uses compiled code""" y = jnp.ones(5) # captured const def func(x): return lax.while_loop( lambda c: c[1] < 5, lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1), (x, 1)) func(y) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ 1 2 3 4""", testing_stream.output) def test_tap_jvp(self): jvp_fun1 = lambda x, xt: jax.jvp(fun1, (x,), (xt,)) res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1)) self.assertAllClose(100., res_primals, check_dtypes=False) self.assertAllClose(4., res_tangents, check_dtypes=False) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ transforms: ['jvp'] what: a * 2 ( 10.00 0.20 ) transforms: ['jvp'] what: y * 3 ( 30.00 0.60 )""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: a * 2 10.00 what: y * 3 30.00""", testing_stream.output) def test_tap_grad_primal_unused(self): # The output of id_print is not needed for backwards pass def func(x): return 2. * hcb.id_print(x * 3., what="x * 3", output_stream=testing_stream) grad_func = jax.grad(func) arg = jnp.float32(5.) jaxpr = str(jax.make_jaxpr(grad_func)(arg)) # making the Jaxpr does not print anything hcb.barrier_wait() treedef = tree_util.tree_structure(arg) if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, f""" {{ lambda ; a:f32[]. let b:f32[] = mul a 3.00 c:f32[] = outside_call[ arg_treedef={treedef} callback=... identity=True transforms=() ] b _:f32[] = mul c 2.00 d:f32[] = mul 1.00 2.00 e:f32[] = outside_call[ arg_treedef={treedef} callback=... identity=True transforms=(('jvp',), ('transpose',)) ] d f:f32[] = mul e 3.00 in (f,) }}""", jaxpr) else: assertMultiLineStrippedEqual(self, f""" {{ lambda ; a:f32[]. let b:f32[] = mul a 3.00 c:f32[] = outside_call[ arg_treedef={treedef} callback=... identity=True ] b _:f32[] = mul c 2.00 d:f32[] = mul 1.00 2.00 e:f32[] = mul d 3.00 in (e,) }}""", jaxpr) assertMultiLineStrippedEqual(self, "", testing_stream.output) testing_stream.reset() res_grad = grad_func(arg) hcb.barrier_wait() self.assertAllClose(6., res_grad, check_dtypes=False) if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ what: x * 3 15.00 transforms: ['jvp', 'transpose'] what: x * 3 2.00""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: x * 3 15.00""", testing_stream.output) def test_tap_grad_simple(self): def func(x): y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream) return x * hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream) grad_func = jax.grad(func) res_grad = grad_func(jnp.float32(5.)) self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ what: x * 2 10.00 what: y * 3 30.00 transforms: ['jvp', 'transpose'] what: y * 3 5.00 transforms: ['jvp', 'transpose'] what: x * 2 15.00""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: x * 2 10.00 what: y * 3 30.00""", testing_stream.output) def test_tap_grad_grad(self): def func(x): y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream) return x * (y * 3.) grad_func = jax.grad(jax.grad(func)) # making the Jaxpr does not print anything _ = jax.make_jaxpr(grad_func)(5.) hcb.barrier_wait() assertMultiLineStrippedEqual(self, "", testing_stream.output) res_grad = grad_func(jnp.float32(5.)) self.assertAllClose(12., res_grad, check_dtypes=False) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ what: x * 2 10.00 transforms: ['jvp', 'transpose'] what: x * 2 15.00 transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2 2.00 transforms: ['jvp', 'transpose'] what: x * 2 3.00""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: x * 2 10.00""", testing_stream.output) def test_tap_grad_pytree(self): def func(x): x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair", result=(x * 4., x * 5.), output_stream=testing_stream) return x4 + 2. * x5 x = jnp.float32(5.) grad_func = jax.grad(func) print(jax.make_jaxpr(grad_func)(x)) res_grad = grad_func(x) self.assertAllClose(14., res_grad, check_dtypes=False) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ what: pair ( 10.00 15.00 ) transforms: ['jvp', 'transpose'] what: pair ( 0.00 0.00 )""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: pair ( 10.00 15.00 )""", testing_stream.output) def test_tap_jvp_float0(self): def f(x, yint): x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint)) return x * yint res = jax.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0))) self.assertAllClose((6., 0.6), res) def test_tap_grad_float0(self): def func(x, yint): x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream) return x * yint grad_func = jax.grad(func) res_grad = grad_func(jnp.float32(5.), jnp.int32(2)) self.assertAllClose(2., res_grad, check_dtypes=False) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ what: pair ( 5.00 2 ) transforms: ['jvp', 'transpose'] what: pair ( 2.00 False )""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ what: pair ( 5.00 2 )""", testing_stream.output) def test_tap_grad_float0_result(self): # https://github.com/google/jax/issues/7340 # x is a Tuple[f32[2], s32[3]] x = (np.array([.7, .8], dtype=np.float32), np.array([11, 12, 13], dtype=np.int32)) def f_jax(x): x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important return (3. * x[0], x[1]) def f_jax_vjp(x): res, pullback = jax.vjp(f_jax, x) g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype), np.zeros(x[1].shape, dtype=dtypes.float0))) return g g = f_jax_vjp(x) self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0]) self.assertEqual(dtypes.float0, g[1].dtype) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ ( [0.70 0.80] [11 12 13] ) transforms: ['jvp', 'transpose'] ( [0.00 0.00] [False False False] )""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ ( [0.70 0.80] [11 12 13] )""", testing_stream.output) def test_tap_higher_order_grad_float0_result(self): # https://github.com/google/jax/issues/7340 # x is a Tuple[f32[2], s32[3]] x = (np.array([.7, .8], dtype=np.float32), np.array([11, 12, 13], dtype=np.int32)) def f_jax(x): x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important return (jnp.sin(x[0]), x[1]) def wrap_vjp(f, args, res_f_of_args): # Given a function "f" and "args" return the f_vjp and args_vjp def make_ct(res): res_dtype = np.result_type(res) if res_dtype == dtypes.float0: return res ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype) return np.ones(np.shape(res), dtype=ct_dtype) cts = tree_util.tree_map(make_ct, res_f_of_args) def f_vjp(args, cts): res, pullback = jax.vjp(f, *args) return pullback(cts) return (f_vjp, (args, cts)) res = f_jax(x) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ ( [0.70 0.80] [11 12 13] )""", testing_stream.output) testing_stream.reset() # 1st order f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res) res_vjp1 = f_jax_vjp1(*args_vjp1) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: assertMultiLineStrippedEqual(self, """ ( [0.70 0.80] [11 12 13] ) transforms: ['jvp', 'transpose'] ( [0.00 0.00] [False False False] )""", testing_stream.output) else: assertMultiLineStrippedEqual(self, """ ( [0.70 0.80] [11 12 13] )""", testing_stream.output) testing_stream.reset() # 2nd order f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1) res_vjp2 = f_jax_vjp2(*args_vjp2) # 3rd order f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2) _ = f_jax_vjp3(*args_vjp3) def test_tap_vmap(self): vmap_fun1 = jax.vmap(fun1) vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)]) vmap_fun1(vargs) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ transforms: [('batch', {'batch_dims': (0,)})] what: a * 2 [ 8.00 10.00] transforms: [('batch', {'batch_dims': (0,)})] what: y * 3 [24.00 30.00]""", testing_stream.output) def test_tap_vmap_not_batched(self): x = 3. def func(y): # x is not mapped, y is mapped _, y = hcb.id_print((x, y), output_stream=testing_stream) return x + y vmap_func = jax.vmap(func) vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)]) _ = vmap_func(vargs) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ transforms: [('batch', {'batch_dims': (None, 0)})] ( 3.00 [4.00 5.00] )""", testing_stream.output) def test_tap_vmap_vmap(self): # A 2D tensor with x[i, j] = i + j using 2 vmap def sum(x, y): return hcb.id_print(x + y, output_stream=testing_stream) def sum_rows(xv, y): return jax.vmap(sum, in_axes=(0, None))(xv, y) def sum_all(xv, yv): return jax.vmap(sum_rows, in_axes=(None, 0))(xv, yv) xv = jnp.arange(5, dtype=np.int32) yv = jnp.arange(3, dtype=np.int32) # assertMultiLineStrippedEqual(self, "", str(jax.make_jaxpr(sum_all)(xv, yv))) _ = sum_all(xv, yv) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})] [[0 1 2 3 4] [1 2 3 4 5] [2 3 4 5 6]]""", testing_stream.output) def test_tap_vmap_while(self): """Vmap of while.""" def func(x): # like max(x, 2) x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream) x2 = lax.while_loop( lambda x: x < 2, lambda x: hcb.id_print( x + 1, where="body:x+1", output_stream=testing_stream), x1) res = hcb.id_print(x2, where="after:x", output_stream=testing_stream) return res inputs = np.arange(5, dtype=np.int32) self.assertAllClose( np.array([2, 2, 2, 3, 4]), jax.jit(jax.vmap(func))(inputs), check_dtypes=False) hcb.barrier_wait() assertMultiLineStrippedEqual( self, """ transforms: [('batch', {'batch_dims': (0,)})] where: before:x [0 1 2 3 4] transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1 [1 2 3 4 5] transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1 [2 3 3 4 5] transforms: [('batch', {'batch_dims': (0,)})] where: after:x [2 2 2 3 4]""", testing_stream.output) def test_tap_vmap_while_tap_cond(self): """Vmap of while, with a tap in the conditional.""" def func(x): # like max(x, 2) x1 = hcb.id_print(x, where="1", output_stream=testing_stream) x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c", output_stream=testing_stream), lambda x: hcb.id_print(x + 1, where="w_b", output_stream=testing_stream), x1) res = hcb.id_print(x2, where="3", output_stream=testing_stream) return res inputs = np.arange(5, dtype=np.int32) res = jax.jit(jax.vmap(func))(inputs) hcb.barrier_wait() self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False) assertMultiLineStrippedEqual(self, """ transforms: [('batch', {'batch_dims': (0,)})] where: 1 [0 1 2 3 4] transforms: [('batch', {'batch_dims': (0,)})] where: w_c [ True True False False False] transforms: [('batch', {'batch_dims': (0,)})] where: w_b [1 2 3 4 5] transforms: [('batch', {'batch_dims': (0,)})] where: w_c [ True False False False False] transforms: [('batch', {'batch_dims': (0,)})] where: w_b [2 3 3 4 5] transforms: [('batch', {'batch_dims': (0,)})] where: w_c [False False False False False] transforms: [('batch', {'batch_dims': (0,)})] where: 3 [2 2 2 3 4]""", testing_stream.output) def test_tap_transforms_old_doc(self): if not FLAGS.jax_host_callback_ad_transforms: raise unittest.SkipTest("disabled for new behavior") # Examples from the documentation def power3(x): y = x * x # Print both 'x' and 'x^2'. Must pack as a tuple. _, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream) return y * x print(f"impl = {power3(3.)}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"vmap = {jax.vmap(power3)(np.arange(3.))}") hcb.barrier_wait() expected = """ transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2 ( [0. 1. 2.] [0. 1. 4.] )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}") hcb.barrier_wait() expected = """ transforms: ['jvp'] what: x,x^2 ( ( 3. 9. ) ( 0.1 0.6 ) )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"grad = {jax.grad(power3)(3.)}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. ) transforms: ['jvp', 'transpose'] what: x,x^2 ( 0. 3. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}") hcb.barrier_wait() expected = """ transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2 ( [2. 3.] [4. 9.] ) transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2 ( 0. [2. 3.] )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) def test_tap_transforms_doc(self): # Examples from the documentation if FLAGS.jax_host_callback_ad_transforms: raise unittest.SkipTest("disabled for old behavior") def power3(x): y = x * x # Print both 'x' and 'x^2'. Must pack as a tuple. hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream) return y * x print(f"impl = {power3(3.)}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() @jax.custom_jvp def print_tangents(arg): return None @print_tangents.defjvp def print_tangents_jvp(primals, tangents): arg_dot, = tangents hcb.id_print(arg_dot, what="tangents", output_stream=testing_stream) return primals, tangents def power3_with_tangents(x): y = x * x # Print both 'x' and 'x^2'. Must pack as a tuple. hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream) print_tangents((x, y)) return y * x print(f"jvp = {jax.jvp(power3_with_tangents, (3.,), (0.1,))}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. ) what: tangents ( 0.1 0.6 )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"grad = {jax.grad(power3)(3.)}") hcb.barrier_wait() # Only the primals by default expected = """ what: x,x^2 ( 3. 9. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() @jax.custom_vjp def print_cotangents(arg): # Must return the argument for which we want the cotangent. return arg # f_fwd: a -> (b, residual) def print_cotangents_fwd(arg): return print_cotangents(arg), None # f_bwd: (residual, CT b) -> [CT a] def print_cotangents_bwd(residual, ct_b): hcb.id_print(ct_b, what="cotangents", output_stream=testing_stream) return ct_b, print_cotangents.defvjp(print_cotangents_fwd, print_cotangents_bwd) def power3_with_cotangents(x): y = x * x # Print both 'x' and 'x^2'. Must pack as a tuple. hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream) # Must use the output of print_cotangents (x1, y1) = print_cotangents((x, y)) return y1 * x1 print(f"grad = {jax.grad(power3_with_cotangents)(3.)}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. ) what: cotangents ( 9. 3. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() # TODO: grad of grad print(f"vmap = {jax.vmap(power3)(np.array([2., 3.]))}") hcb.barrier_wait() expected = """ transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2 ( [2. 3.] [4. 9.] )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}") hcb.barrier_wait() expected = """ transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2 ( [2. 3.] [4. 9.] )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"vmap o grad {jax.vmap(jax.grad(power3_with_cotangents))(np.array([2., 3.]))}") hcb.barrier_wait() expected = """ transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2 ( [2. 3.] [4. 9.] ) transforms: [('batch', {'batch_dims': (0, 0)})] what: cotangents ( [4. 9.] [2. 3.] )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() print(f"grad o remat = {jax.grad(lambda x: power3(ad_checkpoint.checkpoint(power3)(x)))(3.)}") hcb.barrier_wait() expected = """ what: x,x^2 ( 3. 9. ) what: x,x^2 ( 27. 729. ) what: x,x^2 ( 3. 9. )""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() def test_tap_pmap(self): if len(local_devices()) < 2: raise SkipTest("test requires at least 2 devices") def power3(x): y = x * x # Print both 'x' and 'x^2'. Must pack as a tuple. _, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream, tap_with_device=True) return y * x pmap_power3 = jax.pmap(power3, devices=local_devices()) xv = np.array([3, 4], dtype=np.int32) res = pmap_power3(xv) hcb.barrier_wait() self.assertAllClose(xv * xv * xv, res, check_dtypes=False) # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual( self, """ device: cpu:0 what: x,x^2 ( 3 9 ) device: cpu:1 what: x,x^2 ( 4 16 )""") def test_tap_pmap_vmap(self): # A matrix M[ij] = i * 10 + j nr_devices = len(local_devices()) shape = (nr_devices, 3) matrix = np.fromfunction(lambda i, j: 10. * i + j, shape, dtype=np.int32) def fun1(x, do_print=False): # x: i32 return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True) pmap_vmap_fun1 = jax.pmap( jax.vmap(partial(fun1, do_print=True)), devices=local_devices()) res = pmap_vmap_fun1(matrix) hcb.barrier_wait() expected_res = jax.pmap( jax.vmap(partial(fun1, do_print=False)), devices=local_devices())( matrix) self.assertAllClose(expected_res, res, check_dtypes=False) # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual(self, """ device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [0.00 2.00 4.00] device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [20.00 22.00 24.00]""") def test_tap_pmap_pmap_vmap(self): # A matrix M[ijk] = i * 100 + j * 10 + k nr_devices = len(local_devices()) if nr_devices % 2 != 0: raise SkipTest("test works only on even number of devices") shape = (2, nr_devices // 2, 3) matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape, dtype=np.float32) def fun1(x, do_print=False): # x: f32 y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True) return y ** 2 pmap_fun1 = jax.pmap( jax.pmap(jax.vmap(partial(fun1, do_print=True))), devices=local_devices()) res = pmap_fun1(matrix) hcb.barrier_wait() expected_res = jax.pmap( jax.pmap(jax.vmap(partial(fun1, do_print=False))), devices=local_devices())( matrix) self.assertAllClose(expected_res, res, check_dtypes=False) # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual(self, """ device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [0.00 2.00 4.00] device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [200.00 202.00 204.00]""") @ignore_jit_of_pmap_warning() def test_tap_pmap_pmap_extra(self): """pmap of a pmap surrounded by extra code.""" # A matrix M[ij] = i * 10 + j nr_devices = len(local_devices()) if nr_devices != 2: raise SkipTest("test works only on 2 devices") shape = (2, 1, 3) matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape, dtype=np.float32) def fun(xv, do_print=False): # This will be printed on all devices, with shape [1, 3] xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True) res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv) # This will be printed on all devices, with shape [1, 3] return maybe_print(do_print, res + 1., "after", tap_with_device=True) res = jax.pmap(partial(fun, do_print=True))(matrix) self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False) hcb.barrier_wait() # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual(self, """ device: cpu:0 what: before [[1.00 2.00 3.00]] device: cpu:0 what: inside [2.00 4.00 6.00] device: cpu:0 what: after [[3.00 5.00 7.00]] device: cpu:1 what: before [[101.00 102.00 103.00]] device: cpu:1 what: inside [202.00 204.00 206.00] device: cpu:1 what: after [[203.00 205.00 207.00]]""") def test_tap_jvp_pmap_vmap(self): # A matrix M[ijk] = i * 100 + j * 10 * k nr_devices = len(local_devices()) shape = (nr_devices, 2, 3) matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape, dtype=np.float32) def fun(xv, do_print=False): # x: f32[3] return jax.jvp(jax.pmap(jax.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))), (xv,), (.1 * jnp.ones_like(xv),)) res = fun(matrix, do_print=True) hcb.barrier_wait() expected_res = fun(matrix, do_print=False) self.assertAllClose(expected_res, res, check_dtypes=False) # Assertion text is for 2 devices (also works for 1 device) # Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[0, :, :] if FLAGS.jax_host_callback_ad_transforms: assertMultiDeviceOutputEqual(self, """ device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2 ( [[ 0.00 2.00 4.00] [20.00 22.00 24.00]] [[0.20 0.20 0.20] [0.20 0.20 0.20]] ) device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2 ( [[200.00 202.00 204.00] [220.00 222.00 224.00]] [[0.20 0.20 0.20] [0.20 0.20 0.20]] )""") else: assertMultiDeviceOutputEqual(self, """ device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [[ 0.00 2.00 4.00] [20.00 22.00 24.00]] device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [[200.00 202.00 204.00] [220.00 222.00 224.00]]""") def test_tap_vmap_pmap(self): # A matrix M[ijk] = i * 100 + j * 10 * k nr_devices = len(local_devices()) shape = (2, nr_devices, 3) matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape, dtype=np.float32) def fun(xv, do_print=False): # x: f32[3] return jax.vmap(jax.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv) res = fun(matrix, do_print=True) hcb.barrier_wait() expected_res = fun(matrix, do_print=False) self.assertAllClose(expected_res, res, check_dtypes=False) # Assertion text is for 2 devices (also works for 1 device) # Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[:, 0, :] assertMultiDeviceOutputEqual(self, """ device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [[ 0.00 2.00 4.00] [200.00 202.00 204.00]] device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2 [[ 20.00 22.00 24.00] [220.00 222.00 224.00]]""") @ignore_jit_of_pmap_warning() def test_tap_jit_pmap_extra(self): """jit of a pmap surrounded by extra code.""" # A matrix M[ij] = i * 10 + j nr_devices = len(local_devices()) assert nr_devices in (1, 2) shape = (nr_devices, 3) matrix = np.fromfunction(lambda i, j: 10. * i + j, shape, dtype=np.float32) def fun(xv, do_print=False): # This will be printed on all devices with shape (nr_devices, 3) xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True) res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv) # This will be printed on all devices with shape (nr_devices, 3) return maybe_print(do_print, res + 1., "after", tap_with_device=True) res = jax.jit(partial(fun, do_print=True))(matrix) self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False) hcb.barrier_wait() if len(local_devices()) == 2: assertMultiDeviceOutputEqual(self, """ device: cpu:0 what: before [[ 1.00 2.00 3.00] [11.00 12.00 13.00]] device: cpu:0 what: inside [2.00 4.00 6.00] device: cpu:0 what: after [[ 3.00 5.00 7.00] [23.00 25.00 27.00]] device: cpu:1 what: before [[ 1.00 2.00 3.00] [11.00 12.00 13.00]] device: cpu:1 what: inside [22.00 24.00 26.00] device: cpu:1 what: after [[ 3.00 5.00 7.00] [23.00 25.00 27.00]]""") else: assert len(local_devices()) == 1 assertMultiDeviceOutputEqual(self, """ device: cpu:0 what: before [[1.00 2.00 3.00]] device: cpu:0 what: inside [2.00 4.00 6.00] device: cpu:0 what: after [[3.00 5.00 7.00]]""") @unittest.skip("cond of pmap does not work in JAX. Issue #5178.") def test_tap_cond_pmap(self): # A matrix M[ij] = i * 10 + j nr_devices = len(local_devices()) shape = (nr_devices, 3) matrix = np.fromfunction(lambda i, j: 10. * i + j, shape, dtype=np.float32) def fun1(x, do_print=False): return maybe_print(do_print, x * 2., "x * 2") def fun2(cond, xv, do_print=False): return lax.cond(cond, jax.pmap(partial(fun1, do_print=do_print)), lambda xv: xv, xv) res = fun2(True, matrix) self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False) hcb.barrier_wait() assertMultiLineStrippedEqual(self, """ TBD""", testing_stream.output) @jtu.skip_on_devices("cpu", "gpu") # TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall def test_tap_pjit(self): devices = np.array(local_devices()) nr_devices = len(devices) if nr_devices < 2: raise SkipTest("test requires at least 2 devices") print(f"test_tap_pjit is running on devices {devices}.") # x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...] # y: i32[3, 4] x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3] y = jnp.ones((3, 4), np.int32) @partial(jax.named_call, name="fun1") # for xprof debugging def fun1(x, do_print=False): z = jnp.dot(x, y) return maybe_print(do_print, z, "z", tap_with_device=True) res0 = fun1(x, do_print=False) pjit_fun1 = pjit.pjit( partial(fun1, do_print=True), in_axis_resources=(P("d"),), out_axis_resources=P("d")) with maps.mesh(devices, ["d"]): # Print the internal IR helper_log_ir( f"{self._testMethodName}.pjit", pjit_fun1, x, num_partitions=nr_devices) res = pjit_fun1(x) self.assertAllClose(res0, res) hcb.barrier_wait("before check") # Assertion text is for 2 devices (also works for 1 device) # Note that a single call is made. assertMultiDeviceOutputEqual( self, """ device: cpu:0 what: z [[ 3 3 3 3] [33 33 33 33]]""") def test_tap_scan_custom_jvp(self): """custom JVP, inside scan. This exercises the custom_jvp_call_jaxpr primitives.""" @jax.custom_jvp def f(x): return x * hcb.id_print(x, output_stream=testing_stream, what="x") @f.defjvp def f_jvp(primals, tangents): x, = primals x_dot, = tangents primal_out = f(x) tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot") return primal_out, tangent_out def g(x): # Sum f(x_i) return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((2,), 0.7) self.assertAllClose(0.7 * 0.7 * 2, g(arg)) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" what: x 0.7 what: x 0.7""", testing_stream.output) testing_stream.reset() self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" what: x 0.7 what: x 0.7 transforms: ['transpose'] what: x_dot 2.1 transforms: ['transpose'] what: x_dot 2.1""", testing_stream.output) def test_tap_scan_custom_vjp(self): """custom VJP, inside scan. This exercises the custom_vjp_call_jaxpr primitives.""" @jax.custom_vjp def f(x): return x * hcb.id_print(x, output_stream=testing_stream, what="x") # f_fwd: a -> (b, residual) def f_fwd(x): return f(x), 3. * x # f_bwd: (residual, CT b) -> [CT a] def f_bwd(residual, ct_b): return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"), f.defvjp(f_fwd, f_bwd) def g(x): # Sum f(x_i) return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((2,), 0.7) self.assertAllClose(0.7 * 0.7 * 2, g(arg)) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" what: x 0.7 what: x 0.7""", testing_stream.output) testing_stream.reset() self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" what: x 0.7 what: x 0.7 what: ct_b 1. what: ct_b 1.""", testing_stream.output) def test_tap_mask(self): @partial(jax.mask, in_shapes=['n'], out_shape='') def padded_sum(x): three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x", output_stream=testing_stream) return jnp.sum(three_x) x = np.arange(5.) self.assertAllClose(9., padded_sum([x], dict(n=3))) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" transforms: [('mask', {'logical_shapes': 5})] what: x ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""", testing_stream.output) testing_stream.reset() # With VMAP xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5 self.assertAllClose( np.array([9., 78.]), # batch_size = 2, n=3 and 4 for the two elements jax.vmap(padded_sum)([xv], dict(n=np.array([3., 4.])))) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x ( ( [[0. 1. 2. 3. 4.] [5. 6. 7. 8. 9.]] [[ 0. 2. 4. 6. 8.] [10. 12. 14. 16. 18.]] ) ( ( [3. 4.] ) ( [3. 4.] ) ) )""", testing_stream.output) testing_stream.reset() # With JVP self.assertAllClose((9., 0.9), jax.jvp(lambda arg: padded_sum([arg], dict(n=3)), (x,), (x * 0.1,))) hcb.barrier_wait() if FLAGS.jax_host_callback_ad_transforms: self.assertMultiLineStrippedEqual(""" transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x ( ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) ) ( ( [0. 0.1 0.2 0.3 0.4] [0. 0.2 0.4 0.6 0.8] ) ( ( False ) ( False ) ) ) )""", testing_stream.output) else: self.assertMultiLineStrippedEqual(""" transforms: [('mask', {'logical_shapes': 5})] what: x ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""", testing_stream.output) testing_stream.reset() # Now with JIT self.assertAllClose(9., jax.jit(padded_sum)([x], dict(n=3))) hcb.barrier_wait() self.assertMultiLineStrippedEqual(""" transforms: [('mask', {'logical_shapes': 5})] what: x ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""", testing_stream.output) def test_tap_callback_delay(self): hcb.callback_extra = lambda dev: time.sleep(1) def func(x): for i in range(5): x = hcb.id_print(x * i, what="x times i") return x jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3))) def test_tap_callback_delay_barrier(self): hcb.callback_extra = lambda dev: time.sleep(2) def func(x): for i in range(1, 4): x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream) return x jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3))) # Wait for the results hcb.barrier_wait("first") expected = """ what: x times 1 [[0. 1. 2.] [3. 4. 5.]] what: x times 2 [[ 0. 2. 4.] [ 6. 8. 10.]] what: x times 3 [[ 0. 6. 12.] [18. 24. 30.]]""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) testing_stream.reset() # Call again jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3))) hcb.barrier_wait("second") self.assertMultiLineStrippedEqual(expected, testing_stream.output) def test_tap_error_bad_consumer_id(self): """Try to use reserved consumer ID 0. Check that we get the proper error from the runtime.""" if not hcb._use_outfeed(jtu.device_under_test()): raise SkipTest("test works only for outfeed") comp = xla_client.XlaBuilder(self._testMethodName) token = hcb.xops.CreateToken(comp) hcb._initialize_outfeed_receiver() # Needed if this is the sole test with self.assertRaisesRegex(RuntimeError, "Consumer ID cannot be a reserved value: 0"): hcb._callback_handler_data.receiver.add_outfeed( comp, token, 0, [xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))]) def test_tap_error_different_shapes(self): """Try to register different shapes for the same consumer ID.""" if not hcb._use_outfeed(jtu.device_under_test()): raise SkipTest("test works only for outfeed") comp = xla_client.XlaBuilder(self._testMethodName) token = hcb.xops.CreateToken(comp) hcb._initialize_outfeed_receiver() # Needed if this is the sole test hcb._callback_handler_data.receiver.add_outfeed( comp, token, 123, [xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))]) with self.assertRaisesRegex( RuntimeError, ".*does not match previous shape element_type.*"): hcb._callback_handler_data.receiver.add_outfeed( comp, token, 123, [xops.Constant(comp, np.zeros((2, 3), dtype=np.int32))]) with self.assertRaisesRegex( RuntimeError, ".*does not match previous shape element_type.*"): hcb._callback_handler_data.receiver.add_outfeed( comp, token, 123, [xops.Constant(comp, np.zeros((2,), dtype=np.float32))]) def test_tap_id_tap_removed_kwargs(self): def func(x, transforms, y): pass with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"): hcb.id_tap(func, 1, y=2) def test_tap_odeint(self): # TODO: find a smaller repro for bug #4015 # Seems to be xla_call(scan(xla_call)), all under grad. from jax.experimental.ode import odeint def f(x, t, k): x = hcb.id_print(x) return -k * x def loss(k=1.0): t = jnp.linspace(0, 0.001, num=2) xs = odeint(f, 1.0, t, k) return xs[-1] jax.grad(loss)(1.0) # should not fail def test_tap_remat_0(self): def f(i, k): x = hcb.id_print(k + i, output_stream=testing_stream) return k * x def loss(k): return lax.fori_loop(0, 2, jax.remat(f), k) print(loss(3)) hcb.barrier_wait() expected = """ 3 10""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) @parameterized.named_parameters( jtu.cases_from_list( dict(testcase_name=f"_use_remat={use_remat}_{grad_func}_use_result={use_result}", use_result=use_result, use_remat=use_remat, grad_func=grad_func) for use_result in [True, False] for grad_func in ["grad", "value_and_grad"] for use_remat in ["old", "new", "none"])) def test_tap_remat(self, use_result=False, grad_func="grad", use_remat="new"): def f(x): id_print_result = hcb.id_print(x, output_stream=testing_stream) if use_result: x = id_print_result return 3. * x grad_f = jax.grad if grad_func == "grad" else jax.value_and_grad if use_remat == "old": trans_f = jax.remat(f) elif use_remat == "new": trans_f = ad_checkpoint.checkpoint(f) else: assert use_remat == "none" trans_f = f print(jax.make_jaxpr(grad_f(trans_f))(2.)) grad_f(trans_f)(2.) hcb.barrier_wait() if use_remat == "none": if use_result: if FLAGS.jax_host_callback_ad_transforms: expected = """ 2. transforms: ['jvp', 'transpose'] 3.""" else: # GOOD: whether or not we use_result, in absence of # jax_host_callback_ad_transforms we get the same callback. expected = "2." else: expected = "2." else: # use_remat if use_result: if FLAGS.jax_host_callback_ad_transforms: expected = """ 2. 2. transforms: ['jvp', 'transpose'] 3.""" else: expected = """ 2. 2.""" else: if use_remat == "old": # TODO: we should see two callbacks expected = "" else: # Good: we see two callbacks, whether or not we use the result. expected = """ 2. 2.""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) def test_tap_named_call(self): def tap_scalar(init, do_print=False): @partial(jax.named_call, name="step") def step(acc, step_nr): acc = acc + step_nr maybe_print(do_print, step_nr, what="step_nr") return acc, None return lax.scan(step, init, np.arange(2)) self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True)) hcb.barrier_wait() expected = """ what: step_nr 0 what: step_nr 1""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) class HostCallbackCallTest(jtu.JaxTestCase): """Tests for hcb.call""" def setUp(self): super().setUp() if jtu.device_under_test() == "gpu" and jax.device_count() > 1: raise SkipTest("host_callback broken on multi-GPU platforms (#6447)") testing_stream.reset() testing_stream._test_method_name = self._testMethodName def tearDown(self) -> None: hcb.barrier_wait("HostCallbackCallTest.tearDown") super().tearDown() def call_log_testing_stream(self, func, arg, *, result_shape, name=""): """Call `func` and log inputs and outputs to the testing stream""" def call_log(arg): def val2str(v): return np.array2string(np.array(arg)) testing_stream.write(f"Call {name}({val2str(arg)})\n") res = func(arg) testing_stream.write(f" = {val2str(res)}\n") return res return hcb.call(call_log, arg, result_shape=result_shape) def test_call_simple(self): def f_outside(x): return 2 * x def fun(x): y = hcb.call(f_outside, x + 1, result_shape=x) return 3 * (1 + y) arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg)) @parameterized.named_parameters( jtu.cases_from_list( dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype) for dtype in jtu.dtypes.all if dtype != np.bool_)) def test_call_types(self, dtype=np.float64): def f_outside(x): # Use x + x to ensure that the result type is the same return x + x def fun(x): return hcb.call(f_outside, x + x, result_shape=x) arg = np.arange(24, dtype=dtype).reshape((2, 3, 4)) self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True) def test_call_types_bool(self, dtype=np.float64): def f_outside(x): return np.invert(x) def fun(x): return hcb.call(f_outside, x, result_shape=x) arg = self.rng().choice(a=[True, False], size=(2, 3, 4)) self.assertAllClose(np.invert(arg), fun(arg)) def test_call_tuples(self): def f_outside(args): x, y = args return y, x # Swap the tuple def fun(x): xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x)) return 2 * xy[0] + 3 * xy[1] arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg)) def test_call_empty_arg(self): """Call with empty array.""" result = np.ones((2,), dtype=np.float32) def f_outside(_): return result def fun(x): return x + hcb.call(f_outside, (), result_shape=jax.ShapeDtypeStruct(result.shape, result.dtype)) self.assertAllClose(2. + result, fun(2.)) def test_call_empty_result(self): """Call returning empty array.""" result_shape = (2, 0) def f_outside(_): return np.ones(result_shape, dtype=np.float32) def fun(x): return x + hcb.call(f_outside, 1., result_shape=jax.ShapeDtypeStruct(result_shape, np.float32)) self.assertAllClose(f_outside(0.), fun(2.)) def test_call_empty_result_inside_pytree(self): """Call returning a tuple with an empty array and a non-empty one.""" result_shape_0 = (2, 0) result_shape_2 = (0,) def f_outside(_): return (np.ones(result_shape_0, dtype=np.float32), np.ones((1,), dtype=np.float32), np.ones(result_shape_2, dtype=np.float32)) def fun(x): res = hcb.call(f_outside, 1., result_shape=(jax.ShapeDtypeStruct(result_shape_0, np.float32), jax.ShapeDtypeStruct((1,), np.float32), jax.ShapeDtypeStruct(result_shape_2, np.float32))) self.assertEqual(result_shape_0, res[0].shape) self.assertEqual(result_shape_2, res[2].shape) return x + res[1] self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.)) def test_call_empty_result_all_pytree(self): """Call returning a tuple of empty arrays.""" result_shape = (2, 0) def f_outside(_): return (np.ones(result_shape, dtype=np.float32), np.ones(result_shape, dtype=np.float32)) def fun(x): res = hcb.call(f_outside, 1., result_shape=(jax.ShapeDtypeStruct(result_shape, np.float32), jax.ShapeDtypeStruct(result_shape, np.float32))) return x + res[0] + res[1] self.assertAllClose(np.ones(result_shape, dtype=np.float32), fun(2.)) def test_call_no_result(self): def f_outside(arg): self.call_log_testing_stream(lambda x: None, arg, result_shape=None, name="outside") return arg self.assertAllClose((3., 4.), f_outside((3., 4.))) hcb.barrier_wait() expected = """ Call outside([3. 4.]) = [3. 4.]""" self.assertMultiLineStrippedEqual(expected, testing_stream.output) def test_call_cond(self): def f_outside(args): x, y = args return x * y def loop(x, use_outside=True): def body(i, acc): return lax.cond(i % 2 == 1, lambda _: (hcb.call(f_outside, (acc, i), result_shape=acc) if use_outside else f_outside((acc, i))), lambda _: acc, None) return lax.fori_loop(0, 18, body, x) res_inside = loop(1.2, use_outside=False) self.assertAllClose(res_inside, jax.jit(loop)(1.2)) def test_call_jit_scan_call(self): def f_outside(x): return x def loop(x, use_outside=True): def body(carry, i): if use_outside: return carry + hcb.call(f_outside, i, result_shape=i), None else: return carry + i, None return lax.scan(body, 0, x) x = np.arange(5, dtype=np.int32) res_outside = jax.jit(partial(loop, use_outside=True))(x) self.assertAllClose(res_outside, loop(x, use_outside=False)) def test_call_doc_example1(self): """Examples from the documentation: simplest, call a function""" def host_eig(x): return np.linalg.eigvals(x) shape = (2, 5, 4, 4) m = np.ones(shape, dtype=np.float32) def fun(m): eig_m = hcb.call(host_eig, m, result_shape=jax.ShapeDtypeStruct(m.shape[:-1], m.dtype)) return eig_m expected_res = np.linalg.eigvals(m) self.assertAllClose(expected_res, fun(m)) def test_call_doc_example_hlo(self): """Examples from the documentation: simplest, call a function.""" def fun1(m): return jnp.sin(hcb.call(lambda x: np.cos, jnp.cos(m), result_shape=m)) m = np.ones((2,), np.float32) helper_print_optimized_hlo(fun1, m) def fun2(m): x = hcb.call(lambda x: None, 2, result_shape=()) return x m = np.ones((2,), np.float32) helper_print_optimized_hlo(fun2, m) def test_call_with_device(self): def callback_func(x, device=None): testing_stream.write(f"device: {device}\n Called with {x}") return x def func(x): return hcb.call(callback_func, x, result_shape=x, call_with_device=True) self.assertEqual(3., func(3.)) assertMultiDeviceOutputEqual(self, """ device: cpu:0 Called with 3.00""") def test_call_pmap(self): # Works for 1 or 2 devices def callback_func(x, device=None): testing_stream.write(f"device: {device}\n Called with {x}") return x * np.array(3, np.int32) def fun(x): # x: i32 return hcb.call(callback_func, x * 2, result_shape=x, call_with_device=True) xv = jnp.arange(len(local_devices()), dtype=jnp.int32) res = jax.pmap(fun)(xv) self.assertAllClose(jax.pmap(lambda x: x * 6)(xv), res) # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual(self, """ device: cpu:0 Called with 0 device: cpu:1 Called with 2""") def test_call_vmap(self): def f_outside(x): return x def fun(x): return hcb.call(f_outside, x, result_shape=x) with self.assertRaisesRegex(NotImplementedError, "batching rules are implemented only for id_tap, not for call"): jax.vmap(fun)(np.ones((2, 3))) @jtu.skip_on_devices("cpu", "gpu") # TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall def test_call_pjit(self): devices = np.array(local_devices()) nr_devices = len(devices) if nr_devices < 2: raise SkipTest("test requires at least 2 devices") print(f"test_call_pjit is running on devices {devices}.") # x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...] # y: i32[3, 4] x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3] y = jnp.ones((3, 4), np.int32) def callback_x5_func(x, device=None): testing_stream.write(f"device: {device}\n Called with {x}") return x * np.array(5, np.int32) def fun(x): xy = jnp.dot(x, y) return hcb.call( callback_x5_func, xy, result_shape=xy, call_with_device=True) pjit_fun = pjit.pjit( fun, in_axis_resources=(P("d"),), out_axis_resources=P("d")) with maps.mesh(devices, ["d"]): # Print the internal IR helper_log_ir( f"{self._testMethodName}.pjit", pjit_fun, x, num_partitions=nr_devices) res = pjit_fun(x) expected_res = jnp.dot(x, y) * np.array(5, np.int32) self.assertAllClose(expected_res, res, check_dtypes=False) hcb.barrier_wait("before assertion") # Assertion text is for 2 devices (also works for 1 device) assertMultiDeviceOutputEqual( self, """ device: cpu:0 Called with [[ 3 3 3 3] [33 33 33 33]]""") def test_call_error_bad_result_shape(self): with self.assertRaisesRegex( ValueError, "The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"): hcb.call(lambda x: x, 3., result_shape="string") with self.assertRaisesRegex( ValueError, "The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"): hcb.call(lambda x: x, 3., result_shape=lambda x: x) hcb.barrier_wait("wait for error") def helper_check_callback_errors(self, thunk: Callable, expected_exc_txt: str): """Calls thunk() and checks for expected exceptions. """ if jtu.device_under_test() == "cpu": # On CPU the runtime crashes, and the tests are all aborted raise SkipTest("TODO: CPU runtime crashes on unexpected infeed") elif jtu.device_under_test() == "gpu": # On GPU we get a nice error back to Python with self.assertRaisesRegex( RuntimeError, "RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."): thunk() elif jtu.device_under_test() == "tpu": # On TPU we get no error!!! raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage") # Both on GPU and TPU we also get an error during the barrier_wait at the # end of the test. Run a barrier_wait now, to consume that error. with self.assertRaisesRegex( hcb.CallbackException, re.compile( "There were exceptions during callback processing.*Last one was:.*" + expected_exc_txt, re.DOTALL)): hcb.barrier_wait("Waiting for error") def test_call_error_callback_throws_exception(self): def f_outside(x): raise ValueError("user exception") def fun(x): return hcb.call(f_outside, x, result_shape=x) self.helper_check_callback_errors(lambda: fun(3.), "ValueError: user exception") def test_call_error_callback_returns_unexpected_shape(self): def fun(x): return hcb.call(lambda x: (x, x), x, result_shape=x) self.helper_check_callback_errors(lambda: fun(3.), "Callback func .* should have returned a result with pytree") def test_call_error_then_compute(self): # Continue computation on device after error def f_outside(x): raise ValueError("user exception") def fun(x): x1 = hcb.call(f_outside, x, result_shape=x) return x1 arg = np.arange(3, dtype=np.int32) self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)), "ValueError: user exception") def call_jax_other_device(jax_outside_fun, arg, *, device): """Calls a JAX function on a specific device with simple support for reverse AD. Functions whose name starts with "jax_outside" are called on another device, by way of hcb.call. """ def run_jax_outside_fun(arg): return jax.jit(jax_outside_fun)(jax.device_put(arg, device)) @jax.custom_vjp def make_call(arg): return hcb.call(run_jax_outside_fun, arg, result_shape=jax.eval_shape(jax_outside_fun, arg)) # Define the fwd and bwd custom_vjp functions def make_call_vjp_fwd(arg): # Return the primal argument as the residual. Use `make_call` for the # primal computation to enable higher-order AD. return make_call(arg), arg # Return the primal argument as the residual def make_call_vjp_bwd(res, ct_res): arg = res # residual is the primal argument def jax_outside_vjp_fun(arg_and_ct): arg, ct = arg_and_ct _, f_vjp = jax.vjp(jax_outside_fun, arg) ct_in, = f_vjp(ct) return ct_in return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),) make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd) return make_call(arg) class CallJaxTest(jtu.JaxTestCase): """Tests using `call_jax_other_device`.""" def setUp(self): if jtu.device_under_test() == "gpu" and jax.device_count() > 1: raise SkipTest("host_callback broken on multi-GPU platforms (#6447)") if jtu.device_under_test() != "cpu": assert jax.devices("cpu") self.outside_device = jax.devices("cpu")[0] else: if len(jax.devices("cpu")) == 1: raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2") self.outside_device = jax.devices("cpu")[1] super().setUp() def test_jax_impl(self): def f_jax(x): return jnp.sin(x) def f_outside(x): return call_jax_other_device(f_jax, x, device=self.outside_device) self.assertAllClose(f_jax(3.), f_outside(3.)) self.assertAllClose(f_jax(3.), jax.jit(f_outside)(3.)) def test_jax_impl_pytree(self): def f_jax(x): # x : dict(a=..., b=...) and output is a list of two elements return [jnp.sin(x["a"]), jnp.sin(x["b"])] def f_outside(x): return call_jax_other_device(f_jax, x, device=self.outside_device) x = dict(a=3., b=4.) res_jax = f_jax(x) # print(f"outside_jaxpr = {jax.make_jaxpr(f_outside)(x)}") res_outside = f_outside(x) self.assertAllClose(res_jax, res_outside) def test_jax_grad(self): def f_jax(x): return 2. * jnp.sin(x) def f_outside(x): return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device) res_jax = jax.grad(f_jax)(3.) self.assertAllClose(res_jax, jax.grad(f_outside)(3.)) def test_jax_grad_pytree(self): def f_jax(x): # x : dict(a=..., b=...) and output is a float return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"]) def f_outside(x): return call_jax_other_device(f_jax, x, device=self.outside_device) x = dict(a=3., b=4.) res_jax = jax.grad(f_jax)(x) self.assertAllClose(res_jax, jax.grad(f_outside)(x)) def test_jax_grad_of_grad(self): def f_jax(x): return 2. * x * x * x def f_outside(x): return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device) res_jax = jax.grad(jax.grad(f_jax))(5.) res_outside = jax.grad(jax.grad(f_outside))(5.) self.assertAllClose(res_jax, res_outside) class OutfeedRewriterTest(jtu.JaxTestCase): def setUp(self): if jtu.device_under_test() == "gpu" and jax.device_count() > 1: raise SkipTest("host_callback broken on multi-GPU platforms (#6447)") super().setUp() def assertRewrite(self, expected: str, func: Callable, args: Sequence, has_input_token=True, has_output_token=True): """Check that the rewrite of func(*args) matches expected.""" jaxpr = jax.make_jaxpr(func)(*args) rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841 has_input_token, has_output_token) # Since it is somewhat annoying to update the Jaxpr assertions when we change # the Jaxpr printing, we do not check these by default. It is recommended that # before making changes to the code generation and Jaxpr rewriting, turn on # the checking, update the expected Jaxpr, and then make the changes. # assertMultiLineStrippedEqual(self, expected, str(rewritten)) del rewritten def test_no_outfeed(self): self.assertRewrite(""" { lambda ; a. let b = mul a a c = add a b in (c,) }""", lambda x: x + x * x, [0], has_input_token=False, has_output_token=False) self.assertRewrite(""" { lambda ; a d e. let b = mul a a c = add a b in (c,) }""", lambda x: x + x * x, [0], has_output_token=False) self.assertRewrite(""" { lambda ; a d e. let b = mul a a c = add a b in (c, d, e) }""", lambda x: x + x * x, [0]) def test_simple_outfeed(self): self.assertRewrite(""" { lambda ; a d e. let b = add a a c f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] b d e in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0]) def test_simple_outfeed_without_input_token(self): self.assertRewrite(""" { lambda ; a b. let e = create_token a b f = create_token a b c = add a b d g h = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] c e f in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2], has_input_token=False, has_output_token=False) def test_simple_outfeed_without_input_token_nor_invars(self): self.assertRewrite(""" { lambda ; . let b = create_token c = create_token a d e = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] 42 b c in (a,) }""", lambda: hcb.id_print(42), [], has_input_token=False, has_output_token=False) def test_multiple_tap_without_dependencies(self): def f(x): hcb.id_print(x, what="x") hcb.id_print(x + 1, what="x + 1") return 2 self.assertRewrite(""" { lambda ; a c d. let _ e f = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a c d b = add a 1 _ g h = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] b e f in (2, g, h) }""", f, [1]) def test_cond(self): y = jnp.ones(5) # captured const def func(x, z): return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)), z, lambda a: (hcb.id_print(a), y)) self.assertRewrite(""" { lambda a ; b c h i. let d = gt c 0 e = convert_element_type[ new_dtype=int32 ] d f g j k = cond[ branches=( { lambda ; a b c d f g. let e h i = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] d f g in (e, a, h, i) } { lambda ; f_ a b c g h. let d = broadcast_in_dim[ broadcast_dimensions=( ) shape=(5,) ] 0.00 in (a, d, g, h) } ) linear=(False, False, False, False, False, False) ] e a 1 2 c h i in (f, g, j, k) }""", func, [y, 5]) def test_while(self): ct_body = jnp.ones(5, np.float32) # captured const for the body ct_cond = jnp.ones(5, np.float32) # captured const for the conditional def func(x): # x: f32[5] # c: (f32[5], f32) return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond), lambda c: (ct_body, hcb.id_print(c[1]) + 1.), (x, np.float32(1.))) self.assertRewrite(""" { lambda a b ; c f g. let d e h i = while[ body_jaxpr={ lambda ; a b c f g. let d h i = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] c f g e = add d 1.00 in (a, e, h, i) } body_nconsts=1 cond_jaxpr={ lambda ; a b c g h. let d = add b a e = reduce_sum[ axes=(0,) ] d f = lt c e in (f,) } cond_nconsts=1 ] a b c 1.00 f g in (d, e, h, i) }""", func, [ct_body]) def test_while_pred_outfeed(self): """A while with outfeed in the pred.""" ct_body = jnp.ones(5) # captured const for the body ct_cond = jnp.ones(2) # captured const for the conditional def func(x): return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5, lambda c: (ct_body, hcb.id_print(c[1]) + 1), (x, 1)) self.assertRewrite(""" { lambda a b ; c f g. let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h. let d i j = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a g h e = id_tap_dep c d f = lt e 5 in (f, i, j) } donated_invars=(False, False, False, False, False) name=cond_before ] a c 1 f g bf d e h i = while[ body_jaxpr={ lambda ; r s t u v w x. let y z ba bb = xla_call[ call_jaxpr={ lambda ; a b c f g. let d h i = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] c f g e = add d 1 in (a, e, h, i) } donated_invars=(False, False, False, False, False) name=body ] s u v w x bc bd be = xla_call[ call_jaxpr={ lambda ; a b c g h. let d i j = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a g h e = id_tap_dep c d f = lt e 5 in (f, i, j) } donated_invars=(False, False, False, False, False) name=cond_body ] r y z ba bb in (bc, y, z, bd, be) } body_nconsts=2 cond_jaxpr={ lambda ; m n o p q. let in (m,) } cond_nconsts=0 ] a b j c 1 k l in (d, e, h, i) }""", func, [ct_body]) def test_scan(self): y = jnp.ones(5) # captured const def func(x): return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x) self.assertRewrite(""" { lambda a ; b f g. let c d h i e = scan[ jaxpr={ lambda ; a b c g h d. let e f i j = outside_call[ arg_treedef=PyTreeDef(tuple, [*,*]) callback=... has_token=True identity=True ] b c g h in (e, f, i, j, a) } length=5 linear=(False, False, False, False, False, False) num_carry=4 num_consts=1 reverse=False unroll=1 ] a 1 2 f g b in (c, d, e, h, i) }""", func, [y]) def test_scan_custom_jvp(self): """custom JVP, inside scan. This exercises the custom_jvp_call_jaxpr primitives.""" @jax.custom_jvp def f(x): return x * hcb.id_print(x) @f.defjvp def f_jvp(primals, tangents): x, = primals x_dot, = tangents primal_out = f(x) tangent_out = 3. * x * hcb.id_print(x_dot) return primal_out, tangent_out def g(x): # Sum f(x_i) return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((5,), 0.7) self.assertRewrite(""" { lambda ; a c d. let b e f _ = scan[ jaxpr={ lambda ; a e f b. let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e. let b f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a d e c = mul a b in (c, f, g) } num_consts=0 ] b e f d = add a c in (d, g, h, 0.00) } length=5 linear=(False, False, False, False) num_carry=3 num_consts=0 reverse=False unroll=1 ] 0.00 c d a in (b, e, f) }""", g, [arg]) self.assertRewrite(""" { lambda ; a d e. let _ _ f g _ b = scan[ jaxpr={ lambda ; a b h i c d. let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e. let b f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a d e c = mul a b in (c, f, g) } num_consts=0 ] c h i f = add a e g = mul c 3.00 in (f, *, j, k, 0.00, g) } length=5 linear=(False, True, False, False, False, True) num_carry=4 num_consts=0 reverse=False unroll=1 ] 0.00 * d e a * _ _ h i _ c = scan[ jaxpr={ lambda ; a b g h c d. let e = mul b d f i j = outside_call[ arg_treedef=* callback=... has_token=True identity=True transforms=(('transpose',),) ] e g h in (*, b, i, j, *, f) } length=5 linear=(True, True, False, False, True, False) num_carry=4 num_consts=0 reverse=True unroll=1 ] * 1.00 f g * b in (c, h, i) }""", jax.grad(g), [arg]) def test_scan_custom_vjp(self): """custom VJP, inside scan. This exercises the custom_vjp_call_jaxpr primitives.""" @jax.custom_vjp def f(x): return x * hcb.id_print(x) # f_fwd: a -> (b, residual) def f_fwd(x): return f(x), 3. * x # f_bwd: (residual, CT b) -> [CT a] def f_bwd(residual, ct_b): return residual * hcb.id_print(ct_b), f.defvjp(f_fwd, f_bwd) def g(x): # Sum f(x_i) return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((2,), 0.7) self.assertRewrite(""" { lambda ; a c d. let b e f _ = scan[ jaxpr={ lambda ; a e f b. let c g h = custom_vjp_call_jaxpr[ fun_jaxpr={ lambda ; a d e. let b f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a d e c = mul a b in (c, f, g) } num_consts=0 ] b e f d = add a c in (d, g, h, 0.00) } length=2 linear=(False, False, False, False) num_carry=3 num_consts=0 reverse=False unroll=1 ] 0.00 c d a in (b, e, f) }""", g, [arg]) self.assertRewrite(""" { lambda ; a d e. let _ _ f g _ b = scan[ jaxpr={ lambda ; a b h i c d. let e j k = custom_vjp_call_jaxpr[ fun_jaxpr={ lambda ; a d e. let b f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a d e c = mul a b in (c, f, g) } num_consts=0 ] c h i f = add a e g = mul c 3.00 in (f, *, j, k, 0.00, g) } length=2 linear=(False, True, False, False, False, True) num_carry=4 num_consts=0 reverse=False unroll=1 ] 0.00 * d e a * _ _ h i _ c = scan[ jaxpr={ lambda ; a b g h c d. let e i j = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] b g h f = mul d e in (*, b, i, j, *, f) } length=2 linear=(True, True, False, False, True, False) num_carry=4 num_consts=0 reverse=True unroll=1 ] * 1.00 f g * b in (c, h, i) }""", jax.grad(g), [arg]) def test_remat_loop(self): def f(k, x): x = hcb.id_print(k + x) return -k * x def loss(k): return lax.fori_loop(0, 1, jax.remat(f), k) self.assertRewrite(""" { lambda ; a c d. let _ _ b e f = while[ body_jaxpr={ lambda ; a b c f g. let d = add a 1 e h i = remat_call[ call_jaxpr={ lambda ; a b g h. let c = add a b d i j = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] c g h e = neg a f = mul e d in (f, i, j) } concrete=False name=f ] a c f g in (d, b, e, h, i) } body_nconsts=0 cond_jaxpr={ lambda ; a b c e f. let d = lt a b in (d,) } cond_nconsts=0 ] 0 1 a c d in (b, e, f) }""", loss, [2]) def test_named_call(self): def tap_scalar(init, do_print=False): @partial(jax.named_call, name="step") def step(acc, step_nr): acc = acc + step_nr maybe_print(do_print, step_nr, what="step_nr") return acc, None return lax.scan(step, init, np.arange(2, dtype=np.int32)) self.assertRewrite(""" { lambda a ; b d e. let c = scan[ jaxpr={ lambda ; a b. let c = named_call[ call_jaxpr={ lambda ; a b. let c = add a b in (c,) } name=step ] a b in (c,) } length=2 linear=(False, False) num_carry=1 num_consts=0 reverse=False unroll=1 ] b a in (c, d, e) }""", tap_scalar, [np.int32(3)]) def test_pmap(self): def f(xv): jax.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)), axis_name="i")(xv) self.assertRewrite(""" { lambda ; a b c. let _ d e = xla_pmap[ axis_name=i axis_size=1 backend=None call_jaxpr={ lambda ; a d e. let b f g = outside_call[ arg_treedef=* callback=... has_token=True identity=True ] a d e c = sin b in (c, f, g) } devices=None donated_invars=(False, False, False) global_arg_shapes=(None,) global_axis_size=None in_axes=(0, 0, 0) name=<lambda> out_axes=(0, 0, 0) ] a b c in (d, e) }""", f, [np.array([2.], dtype=np.float32)]) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
test_cache.py
# This file is part of the MapProxy project. # Copyright (C) 2010 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import os import re import shutil import tempfile import threading import time from io import BytesIO from collections import defaultdict import pytest from mapproxy.cache.base import TileLocker from mapproxy.cache.file import FileCache from mapproxy.cache.tile import Tile, TileManager from mapproxy.client.http import HTTPClient from mapproxy.client.wms import WMSClient from mapproxy.compat.image import Image from mapproxy.grid import TileGrid, resolution_range from mapproxy.image import ImageSource from mapproxy.image.opts import ImageOptions from mapproxy.layer import ( BlankImage, CacheMapLayer, DirectMapLayer, MapBBOXError, MapExtent, MapLayer, MapQuery, ResolutionConditional, SRSConditional, ) from mapproxy.request.wms import WMS111MapRequest from mapproxy.source import InvalidSourceQuery, SourceError from mapproxy.source.tile import TiledSource from mapproxy.source.wms import WMSSource from mapproxy.srs import SRS from mapproxy.test.http import assert_query_eq, wms_query_eq, query_eq, mock_httpd from mapproxy.test.image import create_debug_img, is_png, tmp_image from mapproxy.util.coverage import BBOXCoverage TEST_SERVER_ADDRESS = ('127.0.0.1', 56413) GLOBAL_GEOGRAPHIC_EXTENT = MapExtent((-180, -90, 180, 90), SRS(4326)) tmp_lock_dir = None def setup(): global tmp_lock_dir tmp_lock_dir = tempfile.mkdtemp() def teardown(): shutil.rmtree(tmp_lock_dir) class counting_set(object): def __init__(self, items): self.data = defaultdict(int) for item in items: self.data[item] += 1 def add(self, item): self.data[item] += 1 def __repr__(self): return 'counting_set(%r)' % dict(self.data) def __eq__(self, other): return self.data == other.data class MockTileClient(object): def __init__(self): self.requested_tiles = [] def get_tile(self, tile_coord, format=None): self.requested_tiles.append(tile_coord) return ImageSource(create_debug_img((256, 256))) class TestTiledSourceGlobalGeodetic(object): def setup(self): self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) self.client = MockTileClient() self.source = TiledSource(self.grid, self.client) def test_match(self): self.source.get_map(MapQuery([-180, -90, 0, 90], (256, 256), SRS(4326))) self.source.get_map(MapQuery([0, -90, 180, 90], (256, 256), SRS(4326))) assert self.client.requested_tiles == [(0, 0, 1), (1, 0, 1)] def test_wrong_size(self): with pytest.raises(InvalidSourceQuery): self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326))) def test_wrong_srs(self): with pytest.raises(InvalidSourceQuery): self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326))) class MockFileCache(FileCache): def __init__(self, *args, **kw): super(MockFileCache, self).__init__(*args, **kw) self.stored_tiles = set() self.loaded_tiles = counting_set([]) def store_tile(self, tile): assert tile.coord not in self.stored_tiles self.stored_tiles.add(tile.coord) if self.cache_dir != '/dev/null': FileCache.store_tile(self, tile) def load_tile(self, tile, with_metadata=False): self.loaded_tiles.add(tile.coord) return FileCache.load_tile(self, tile, with_metadata) def is_cached(self, tile): return tile.coord in self.stored_tiles def create_cached_tile(tile, cache, timestamp=None): loc = cache.tile_location(tile, create_dir=True) with open(loc, 'wb') as f: f.write(b'foo') if timestamp: os.utime(loc, (timestamp, timestamp)) @pytest.fixture def file_cache(tmpdir): return FileCache(cache_dir=tmpdir.join('cache').strpath, file_ext='png') @pytest.fixture def tile_locker(tmpdir): return TileLocker(tmpdir.join('lock').strpath, 10, "id") @pytest.fixture def mock_tile_client(): return MockTileClient() @pytest.fixture def mock_file_cache(): return MockFileCache('/dev/null', 'png') class TestTileManagerStaleTiles(object): @pytest.fixture def tile_mgr(self, file_cache, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) client = MockTileClient() source = TiledSource(grid, client) tile_mgr = TileManager(grid, file_cache, [source], 'png', locker=tile_locker) return tile_mgr def test_is_stale_missing(self, tile_mgr): assert not tile_mgr.is_stale(Tile((0, 0, 1))) def test_is_stale_not_expired(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache) assert not tile_mgr.is_stale(Tile((0, 0, 1))) def test_is_stale_expired(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache, timestamp=time.time()-3600) tile_mgr._expire_timestamp = time.time() assert tile_mgr.is_stale(Tile((0, 0, 1))) class TestTileManagerRemoveTiles(object): @pytest.fixture def tile_mgr(self, file_cache, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) client = MockTileClient() source = TiledSource(grid, client) image_opts = ImageOptions(format='image/png') return TileManager(grid, file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker) def test_remove_missing(self, tile_mgr): tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)]) def test_remove_existing(self, tile_mgr, file_cache): create_cached_tile(Tile((0, 0, 1)), file_cache) assert tile_mgr.is_cached(Tile((0, 0, 1))) tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)]) assert not tile_mgr.is_cached(Tile((0, 0, 1))) class TestTileManagerTiledSource(object): @pytest.fixture def tile_mgr(self, tile_locker, mock_file_cache, mock_tile_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = TiledSource(grid, mock_tile_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tiles(self, tile_mgr, mock_file_cache, mock_tile_client): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert sorted(mock_tile_client.requested_tiles) == [(0, 0, 1), (1, 0, 1)] class TestTileManagerDifferentSourceGrid(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_tile_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source_grid = TileGrid(SRS(4326), bbox=[0, -90, 180, 90]) source = TiledSource(source_grid, mock_tile_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tiles(self, tile_mgr, mock_file_cache, mock_tile_client): tile_mgr.creator().create_tiles([Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) assert mock_tile_client.requested_tiles == [(0, 0, 0)] def test_create_tiles_out_of_bounds(self, tile_mgr): with pytest.raises(InvalidSourceQuery): tile_mgr.creator().create_tiles([Tile((0, 0, 0))]) class MockSource(MapLayer): def __init__(self, *args): MapLayer.__init__(self, *args) self.requested = [] def _image(self, size): return create_debug_img(size) def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) return ImageSource(self._image(query.size)) @pytest.fixture def mock_source(): return MockSource() class TestTileManagerSource(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [mock_source], 'png', image_opts=image_opts, locker=tile_locker, ) def test_create_tile(self, tile_mgr, mock_file_cache, mock_source): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert sorted(mock_source.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (256, 256), SRS(4326))] class MockWMSClient(object): def __init__(self): self.requested = [] def retrieve(self, query, format): self.requested.append((query.bbox, query.size, query.srs)) return create_debug_img(query.size) @pytest.fixture def mock_wms_client(): return MockWMSClient() class TestTileManagerWMSSource(object): @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, mock_wms_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, ) def test_same_lock_for_meta_tile(self, tile_mgr): assert tile_mgr.lock(Tile((0, 0, 1))).lock_file == \ tile_mgr.lock(Tile((1, 0, 1))).lock_file def test_locks_for_meta_tiles(self, tile_mgr): assert tile_mgr.lock(Tile((0, 0, 2))).lock_file != \ tile_mgr.lock(Tile((2, 0, 2))).lock_file def test_create_tile_first_level(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert mock_wms_client.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] def test_create_tile(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326))] def test_create_tiles(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((0, 0, 2)), Tile((2, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))] def test_load_tile_coords(self, tile_mgr, mock_file_cache, mock_wms_client): tiles = tile_mgr.load_tile_coords(((0, 0, 2), (2, 0, 2))) assert tiles[0].coord == (0, 0, 2) assert isinstance(tiles[0].source, ImageSource) assert tiles[1].coord == (2, 0, 2) assert isinstance(tiles[1].source, ImageSource) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)), ((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))] class TestTileManagerWMSSourceConcurrent(TestTileManagerWMSSource): @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, mock_wms_client): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, concurrent_tile_creators=2, ) class TestTileManagerWMSSourceMinimalMetaRequests(object): @pytest.fixture def tile_mgr(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) return TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=10, minimize_meta_requests=True, locker=tile_locker, ) def test_create_tile_single(self, tile_mgr, mock_file_cache, mock_wms_client): # not enabled for single tile requests tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (0, 1, 2), (1, 0, 2), (1, 1, 2)]) assert sorted(mock_wms_client.requested) == \ [((-180.0, -90.0, 3.515625, 90.0), (522, 512), SRS(4326))] def test_create_tile_multiple(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((4, 1, 3)), Tile((4, 2, 3))]) assert mock_file_cache.stored_tiles == \ set([(4, 0, 3), (4, 1, 3), (4, 2, 3)]) assert sorted(mock_wms_client.requested) == \ [((-1.7578125, -90, 46.7578125, 46.7578125), (276, 778), SRS(4326))] def test_create_tile_multiple_fragmented(self, tile_mgr, mock_file_cache, mock_wms_client): tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((5, 2, 3))]) assert mock_file_cache.stored_tiles == \ set([(4, 0, 3), (4, 1, 3), (4, 2, 3), (5, 0, 3), (5, 1, 3), (5, 2, 3)]) assert sorted(mock_wms_client.requested) == \ [((-1.7578125, -90, 91.7578125, 46.7578125), (532, 778), SRS(4326))] class SlowMockSource(MockSource): supports_meta_tiles = True def get_map(self, query): time.sleep(0.1) return MockSource.get_map(self, query) class TestTileManagerLocking(object): @pytest.fixture def slow_source(self): return SlowMockSource() @pytest.fixture def mock_file_cache(self, tmpdir): return MockFileCache(tmpdir.strpath, 'png') @pytest.fixture def tile_mgr(self, mock_file_cache, slow_source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [slow_source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker, ) def test_get_single(self, tile_mgr, mock_file_cache, slow_source): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert slow_source.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] def test_concurrent(self, tile_mgr, mock_file_cache, slow_source): def do_it(): tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) threads = [threading.Thread(target=do_it) for _ in range(3)] [t.start() for t in threads] [t.join() for t in threads] assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert mock_file_cache.loaded_tiles == counting_set([(0, 0, 1), (1, 0, 1), (0, 0, 1), (1, 0, 1)]) assert slow_source.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] assert os.path.exists(mock_file_cache.tile_location(Tile((0, 0, 1)))) class TestTileManagerMultipleSources(object): @pytest.fixture def source_base(self): return MockSource() @pytest.fixture def source_overlay(self): return MockSource() @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, source_base, source_overlay): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', image_opts=image_opts, locker=tile_locker, ) def test_get_single(self, tile_mgr, mock_file_cache, source_base, source_overlay): tile_mgr.creator().create_tiles([Tile((0, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1)]) assert source_base.requested == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))] assert source_overlay.requested == \ [((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))] class SolidColorMockSource(MockSource): def __init__(self, color='#ff0000'): MockSource.__init__(self) self.color = color def _image(self, size): return Image.new('RGB', size, self.color) class TestTileManagerMultipleSourcesWithMetaTiles(object): @pytest.fixture def source_base(self): src = SolidColorMockSource(color='#ff0000') src.supports_meta_tiles = True return src @pytest.fixture def source_overlay(self): src = MockSource() src.supports_meta_tiles = True return src @pytest.fixture def tile_mgr(self, mock_file_cache, tile_locker, source_base, source_overlay): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(format='image/png') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', image_opts=image_opts, meta_size=[2, 2], meta_buffer=0, locker=tile_locker, ) def test_merged_tiles(self, tile_mgr, mock_file_cache, source_base, source_overlay): tiles = tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))]) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert source_base.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] assert source_overlay.requested == \ [((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))] hist = tiles[0].source.as_image().histogram() # lots of red (base), but not everything (overlay) assert 55000 < hist[255] < 60000 # red = 0xff assert 55000 < hist[256] # green = 0x00 assert 55000 < hist[512] # blue = 0x00 def test_sources_with_mixed_support_for_meta_tiles(self, mock_file_cache, source_base, source_overlay, tile_locker): source_base.supports_meta_tiles = False grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) with pytest.raises(ValueError): TileManager(grid, file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker) def test_sources_with_no_support_for_meta_tiles(self, mock_file_cache, source_base, source_overlay, tile_locker): source_base.supports_meta_tiles = False source_overlay.supports_meta_tiles = False grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) tile_mgr = TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker) assert tile_mgr.meta_grid is None class TestTileManagerBulkMetaTiles(object): @pytest.fixture def source_base(self): src = SolidColorMockSource(color='#ff0000') src.supports_meta_tiles = False return src @pytest.fixture def source_overlay(self): src = MockSource() src.supports_meta_tiles = False return src @pytest.fixture def tile_mgr(self, mock_file_cache, source_base, source_overlay, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90], origin='ul') return TileManager(grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker, bulk_meta_tiles=True, ) def test_bulk_get(self, tile_mgr, mock_file_cache, source_base, source_overlay): tiles = tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) assert len(tiles) == 2*2 assert mock_file_cache.stored_tiles == set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)]) for requested in [source_base.requested, source_overlay.requested]: assert set(requested) == set([ ((-180.0, 0.0, -90.0, 90.0), (256, 256), SRS(4326)), ((-90.0, 0.0, 0.0, 90.0), (256, 256), SRS(4326)), ((-180.0, -90.0, -90.0, 0.0), (256, 256), SRS(4326)), ((-90.0, -90.0, 0.0, 0.0), (256, 256), SRS(4326)), ]) def test_bulk_get_error(self, tile_mgr, source_base): tile_mgr.sources = [source_base, ErrorSource()] try: tile_mgr.creator().create_tiles([Tile((0, 0, 2))]) except Exception as ex: assert ex.args[0] == "source error" def test_bulk_get_multiple_meta_tiles(self, tile_mgr, mock_file_cache): tiles = tile_mgr.creator().create_tiles([Tile((1, 0, 2)), Tile((2, 0, 2))]) assert len(tiles) == 2*2*2 assert mock_file_cache.stored_tiles, set([ (0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2), ]) class TestTileManagerBulkMetaTilesConcurrent(TestTileManagerBulkMetaTiles): @pytest.fixture def tile_mgr(self, mock_file_cache, source_base, source_overlay, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90], origin='ul') return TileManager( grid, mock_file_cache, [source_base, source_overlay], 'png', meta_size=[2, 2], meta_buffer=0, locker=tile_locker, bulk_meta_tiles=True, concurrent_tile_creators=2, ) class ErrorSource(MapLayer): def __init__(self, *args): MapLayer.__init__(self, *args) self.requested = [] def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) raise Exception("source error") default_image_opts = ImageOptions(resampling='bicubic') class TestCacheMapLayer(object): @pytest.fixture def layer(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) source = WMSSource(mock_wms_client) image_opts = ImageOptions(resampling='nearest') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker) return CacheMapLayer(tile_mgr, image_opts=default_image_opts) def test_get_map_small(self, layer, mock_file_cache): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(0, 0, 1), (1, 0, 1)]) assert result.size == (300, 150) def test_get_map_large(self, layer, mock_file_cache): # gets next resolution layer result = layer.get_map(MapQuery((-180, -90, 180, 90), (600, 300), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert result.size == (600, 300) def test_transformed(self, layer, mock_file_cache): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == \ set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2), (2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]) assert result.size == (500, 500) def test_single_tile_match(self, layer, mock_file_cache): result = layer.get_map(MapQuery( (0.001, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True)) assert mock_file_cache.stored_tiles == \ set([(3, 0, 2), (2, 0, 2), (3, 1, 2), (2, 1, 2)]) assert result.size == (256, 256) def test_single_tile_no_match(self, layer): with pytest.raises(MapBBOXError): layer.get_map( MapQuery((0.1, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True) ) def test_get_map_with_res_range(self, mock_file_cache, mock_wms_client, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) res_range = resolution_range(1000, 10) source = WMSSource(mock_wms_client, res_range=res_range) image_opts = ImageOptions(resampling='nearest') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[2, 2], meta_buffer=0, image_opts=image_opts, locker=tile_locker) layer = CacheMapLayer(tile_mgr, image_opts=default_image_opts) with pytest.raises(BlankImage): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == set() result = layer.get_map(MapQuery( (0, 0, 10000, 10000), (50, 50), SRS(900913), 'png')) assert mock_file_cache.stored_tiles == \ set([(512, 257, 10), (513, 256, 10), (512, 256, 10), (513, 257, 10)]) assert result.size == (50, 50) class TestCacheMapLayerWithExtent(object): @pytest.fixture def source(self, mock_wms_client): return WMSSource(mock_wms_client) @pytest.fixture def layer(self, mock_file_cache, source, tile_locker): grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90]) image_opts = ImageOptions(resampling='nearest', format='png') tile_mgr = TileManager(grid, mock_file_cache, [source], 'png', meta_size=[1, 1], meta_buffer=0, image_opts=image_opts, locker=tile_locker) layer = CacheMapLayer(tile_mgr, image_opts=default_image_opts) layer.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent return layer def test_get_outside_extent(self, layer): with pytest.raises(BlankImage): layer.get_map(MapQuery((-180, -90, 0, 0), (300, 150), SRS(4326), 'png')) def test_get_map_small(self, layer, mock_file_cache, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) # source requests one tile (no meta-tiling configured) assert mock_wms_client.requested == [((0.0, -90.0, 180.0, 90.0), (256, 256), SRS('EPSG:4326'))] assert result.size == (300, 150) def test_get_map_small_with_source_extent(self, source, layer, mock_file_cache, mock_wms_client): source.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_file_cache.stored_tiles == set([(1, 0, 1)]) # source requests one tile (no meta-tiling configured) limited to source.extent assert mock_wms_client.requested == [((0, 0, 90, 45), (128, 64), (SRS(4326)))] assert result.size == (300, 150) class TestDirectMapLayer(object): @pytest.fixture def layer(self, mock_wms_client): source = WMSSource(mock_wms_client) return DirectMapLayer(source, GLOBAL_GEOGRAPHIC_EXTENT) def test_get_map(self, layer, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_wms_client.requested == [((-180, -90, 180, 90), (300, 150), SRS(4326))] assert result.size == (300, 150) def test_get_map_mercator(self, layer, mock_wms_client): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_wms_client.requested == \ [((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913))] assert result.size == (500, 500) class TestDirectMapLayerWithSupportedSRS(object): @pytest.fixture def layer(self, mock_wms_client): source = WMSSource(mock_wms_client) return DirectMapLayer(source, GLOBAL_GEOGRAPHIC_EXTENT) def test_get_map(self, layer, mock_wms_client): result = layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png')) assert mock_wms_client.requested == [((-180, -90, 180, 90), (300, 150), SRS(4326))] assert result.size == (300, 150) def test_get_map_mercator(self, layer, mock_wms_client): result = layer.get_map(MapQuery( (-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913), 'png')) assert mock_wms_client.requested == \ [((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500), SRS(900913))] assert result.size == (500, 500) class MockHTTPClient(object): def __init__(self): self.requested = [] def open(self, url, data=None): self.requested.append(url) w = int(re.search(r'width=(\d+)', url, re.IGNORECASE).group(1)) h = int(re.search(r'height=(\d+)', url, re.IGNORECASE).group(1)) format = re.search(r'format=image(/|%2F)(\w+)', url, re.IGNORECASE).group(2) transparent = re.search(r'transparent=(\w+)', url, re.IGNORECASE) transparent = True if transparent and transparent.group(1).lower() == 'true' else False result = BytesIO() create_debug_img((int(w), int(h)), transparent).save(result, format=format) result.seek(0) result.headers = {'Content-type': 'image/'+format} return result @pytest.fixture def mock_http_client(): return MockHTTPClient() class TestWMSSourceTransform(object): @pytest.fixture def source(self, mock_http_client): req_template = WMS111MapRequest(url='http://localhost/service?', param={ 'format': 'image/png', 'layers': 'foo' }) client = WMSClient(req_template, http_client=mock_http_client) return WMSSource(client, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) def test_get_map(self, source, mock_http_client): source.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326))) assert query_eq(mock_http_client.requested[0], "http://localhost/service?" "layers=foo&width=300&version=1.1.1&bbox=-180,-90,180,90&service=WMS" "&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=150") def test_get_map_transformed(self, source, mock_http_client): source.get_map(MapQuery( (556597, 4865942, 1669792, 7361866), (300, 150), SRS(900913))) assert wms_query_eq(mock_http_client.requested[0], "http://localhost/service?" "layers=foo&width=300&version=1.1.1" "&bbox=4.99999592195,39.9999980766,14.999996749,54.9999994175&service=WMS" "&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=450") class TestWMSSourceWithClient(object): @pytest.fixture def req_template(self): return WMS111MapRequest( url='http://%s:%d/service?' % TEST_SERVER_ADDRESS, param={'format': 'image/png', 'layers': 'foo'}, ) @pytest.fixture def client(self, req_template): return WMSClient(req_template) @pytest.fixture def source(self, client): return WMSSource(client) def test_get_map(self, source): with tmp_image((512, 512)) as img: expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles=' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'}, {'body': img.read(), 'headers': {'content-type': 'image/png'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) result = source.get_map(q) assert isinstance(result, ImageSource) assert result.size == (512, 512) assert is_png(result.as_buffer(seekable=True)) assert result.as_image().size == (512, 512) def test_get_map_non_image_content_type(self, source): with tmp_image((512, 512)) as img: expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles=' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'}, {'body': img.read(), 'headers': {'content-type': 'text/plain'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) try: source.get_map(q) except SourceError as e: assert 'no image returned' in e.args[0] else: assert False, 'no SourceError raised' def test_basic_auth(self, req_template, client, source): http_client = HTTPClient(req_template.url, username='foo', password='bar@') client.http_client = http_client def assert_auth(req_handler): assert 'Authorization' in req_handler.headers auth_data = req_handler.headers['Authorization'].split()[1] auth_data = base64.b64decode(auth_data.encode('utf-8')).decode('utf-8') assert auth_data == 'foo:bar@' return True expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512&STYLES=', 'require_basic_auth': True, 'req_assert_function': assert_auth}, {'body': b'no image', 'headers': {'content-type': 'image/png'}}) with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]): q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326)) source.get_map(q) TESTSERVER_URL = 'http://%s:%d' % TEST_SERVER_ADDRESS class TestWMSSource(object): @pytest.fixture def source(self, mock_http_client): req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo'}) wms = WMSClient(req, http_client=mock_http_client) return WMSSource(wms, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) def test_request(self, source, mock_http_client): req = MapQuery((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326), 'png') source.get_map(req) assert len(mock_http_client.requested) == 1 assert_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A4326' '&VERSION=1.1.1&BBOX=-180.0,-90.0,180.0,90.0&WIDTH=512&STYLES=') def test_transformed_request(self, source, mock_http_client): req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png') resp = source.get_map(req) assert len(mock_http_client.requested) == 1 assert wms_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&WIDTH=512&STYLES=' '&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121') img = resp.as_image() assert img.mode in ('P', 'RGB') def test_similar_srs(self, mock_http_client): # request in 3857 and source supports only 900913 # 3857 and 900913 are equal but the client requests must use 900913 req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo', 'transparent': 'true'}) wms = WMSClient(req, http_client=mock_http_client) source = WMSSource(wms, supported_srs=[SRS(900913)], image_opts=ImageOptions(resampling='bilinear')) req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(3857), 'png') source.get_map(req) assert len(mock_http_client.requested) == 1 assert_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A900913' '&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true' '&BBOX=-200000,-200000,200000,200000') def test_transformed_request_transparent(self, mock_http_client): req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo', 'transparent': 'true'}) wms = WMSClient(req, http_client=mock_http_client) source = WMSSource(wms, supported_srs=[SRS(4326)], image_opts=ImageOptions(resampling='bilinear')) req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png') resp = source.get_map(req) assert len(mock_http_client.requested) == 1 assert wms_query_eq(mock_http_client.requested[0], TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng' '&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326' '&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true' '&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121') img = resp.as_image() assert img.mode in ('P', 'RGBA') img = img.convert('RGBA') assert img.getpixel((5, 5))[3] == 0 class MockLayer(object): def __init__(self): self.requested = [] def get_map(self, query): self.requested.append((query.bbox, query.size, query.srs)) @pytest.mark.parametrize('case,map_query,low_requested', [ ['low', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(3857)), True], ['high', MapQuery((0, 0, 100, 100), (100, 100), SRS(3857)), False], ['match', MapQuery((0, 0, 10, 10), (100, 100), SRS(3857)), False], ['low_transform', MapQuery((0, 0, 0.1, 0.1), (100, 100), SRS(4326)), True], ['high_transform', MapQuery((0, 0, 0.005, 0.005), (100, 100), SRS(4326)), False], ]) def test_resolution_conditional_layers(case, map_query, low_requested): low = MockLayer() high = MockLayer() layer = ResolutionConditional(low, high, 10, SRS(3857), GLOBAL_GEOGRAPHIC_EXTENT) layer.get_map(map_query) assert bool(low.requested) == low_requested assert bool(high.requested) != low_requested def test_srs_conditional_layers(): l4326 = MockLayer() l900913 = MockLayer() l32632 = MockLayer() layer = SRSConditional([ (l4326, (SRS('EPSG:4326'),)), (l900913, (SRS('EPSG:900913'), SRS('EPSG:31467'))), (l32632, (SRSConditional.PROJECTED,)), ], GLOBAL_GEOGRAPHIC_EXTENT) # srs match assert layer._select_layer(SRS(4326)) == l4326 assert layer._select_layer(SRS(900913)) == l900913 assert layer._select_layer(SRS(31467)) == l900913 # type match (projected) assert layer._select_layer(SRS(31466)) == l32632 assert layer._select_layer(SRS(32633)) == l32632 # fallback is first layer assert layer._select_layer(SRS(4258)) == l4326 @pytest.mark.parametrize('case,map_query,is_direct,is_l3857,is_l4326', [ ['high_3857', MapQuery((0, 0, 100, 100), (100, 100), SRS(900913)), True, False, False], ['high_4326', MapQuery((0, 0, 0.0001, 0.0001), (100, 100), SRS(4326)), True, False, False], ['low_4326', MapQuery((0, 0, 10, 10), (100, 100), SRS(4326)), False, False, True], ['low_3857', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(31467)), False, True, False], ['low_projected', MapQuery((0, 0, 10000, 10000), (100, 100), SRS(31467)), False, True, False], ]) def test_neasted_conditional_layers(case, map_query, is_direct, is_l3857, is_l4326): direct = MockLayer() l3857 = MockLayer() l4326 = MockLayer() layer = ResolutionConditional( SRSConditional([ (l3857, (SRS('EPSG:3857'),)), (l4326, (SRS('EPSG:4326'),)) ], GLOBAL_GEOGRAPHIC_EXTENT), direct, 10, SRS(3857), GLOBAL_GEOGRAPHIC_EXTENT ) layer.get_map(map_query) assert bool(direct.requested) == is_direct assert bool(l3857.requested) == is_l3857 assert bool(l4326.requested) == is_l4326
protocols.py
import asyncio import collections import dataclasses import logging import queue import socketserver import threading import time from typing import Any, Callable, Dict, NamedTuple, Optional, Set, Tuple, Union from .captures import Capture, CaptureEntry from .messages import OscBundle, OscMessage osc_in_logger = logging.getLogger("supriya.osc.in") osc_out_logger = logging.getLogger("supriya.osc.out") udp_in_logger = logging.getLogger("supriya.udp.in") udp_out_logger = logging.getLogger("supriya.udp.out") class OscProtocolOffline(Exception): pass class OscProtocolAlreadyConnected(Exception): pass class OscCallback(NamedTuple): pattern: Tuple[Union[str, int, float], ...] procedure: Callable failure_pattern: Optional[Tuple[Union[str, int, float], ...]] = None once: bool = False @dataclasses.dataclass class HealthCheck: request_pattern: str response_pattern: str callback: Callable timeout: float = 1.0 backoff_factor: float = 1.5 max_attempts: int = 5 class OscProtocol: ### INITIALIZER ### def __init__(self): self.callbacks: Dict[Any, Any] = {} self.captures: Set[Capture] = set() self.healthcheck = None self.healthcheck_osc_callback = None self.attempts = 0 self.ip_address = None self.is_running: bool = False self.port = None ### PRIVATE METHODS ### def _add_callback(self, callback: OscCallback): patterns = [callback.pattern] if callback.failure_pattern: patterns.append(callback.failure_pattern) for pattern in patterns: callback_map = self.callbacks for item in pattern: callbacks, callback_map = callback_map.setdefault(item, ([], {})) callbacks.append(callback) def _match_callbacks(self, message): items = (message.address,) + message.contents matching_callbacks = [] callback_map = self.callbacks for item in items: if item not in callback_map: break callbacks, callback_map = callback_map[item] matching_callbacks.extend(callbacks) for callback in matching_callbacks: if callback.once: self.unregister(callback) return matching_callbacks def _remove_callback(self, callback: OscCallback): def delete(pattern, original_callback_map): key = pattern.pop(0) if key not in original_callback_map: return callbacks, callback_map = original_callback_map[key] if pattern: delete(pattern, callback_map) if callback in callbacks: callbacks.remove(callback) if not callbacks and not callback_map: original_callback_map.pop(key) patterns = [callback.pattern] if callback.failure_pattern: patterns.append(callback.failure_pattern) for pattern in patterns: delete(list(pattern), self.callbacks) def _reset_attempts(self, message): self.attempts = 0 def _setup(self, ip_address, port, healthcheck): self.ip_address = ip_address self.port = port self.healthcheck = healthcheck if self.healthcheck: self.healthcheck_osc_callback = self.register( pattern=self.healthcheck.response_pattern, procedure=self._reset_attempts, ) def _teardown(self): self.is_running = False if self.healthcheck: self.unregister(self.healthcheck_osc_callback) def _validate_callback( self, pattern, procedure, *, failure_pattern=None, once=False, ): if isinstance(pattern, (str, int, float)): pattern = [pattern] if isinstance(failure_pattern, (str, int, float)): failure_pattern = [failure_pattern] assert callable(procedure) return OscCallback( pattern=tuple(pattern), failure_pattern=failure_pattern, procedure=procedure, once=bool(once), ) def _validate_receive(self, datagram): udp_in_logger.debug(datagram) try: message = OscMessage.from_datagram(datagram) except Exception: raise osc_in_logger.debug(repr(message)) for callback in self._match_callbacks(message): callback.procedure(message) for capture in self.captures: capture.messages.append( CaptureEntry(timestamp=time.time(), label="R", message=message,) ) def _validate_send(self, message): if not self.is_running: raise OscProtocolOffline if not isinstance(message, (str, collections.Iterable, OscBundle, OscMessage)): raise ValueError(message) if isinstance(message, str): message = OscMessage(message) elif isinstance(message, collections.Iterable): message = OscMessage(*message) osc_out_logger.debug(repr(message)) for capture in self.captures: capture.messages.append( CaptureEntry(timestamp=time.time(), label="S", message=message) ) datagram = message.to_datagram() udp_out_logger.debug(datagram) return datagram ### PUBLIC METHODS ### def capture(self): return Capture(self) def connect(self, ip_address: str, port: int, *, healthcheck: HealthCheck = None): ... def disconnect(self): ... def register( self, pattern, procedure, *, failure_pattern=None, once=False ) -> OscCallback: ... def send(self, message): ... def unregister(self, callback: OscCallback): ... class AsyncOscProtocol(asyncio.DatagramProtocol, OscProtocol): ### INITIALIZER ### def __init__(self): asyncio.DatagramProtocol.__init__(self) OscProtocol.__init__(self) self.loop = None ### PRIVATE METHODS ### async def _run_healthcheck(self): while self.is_running: sleep_time = self.healthcheck.timeout * pow( self.healthcheck.backoff_factor, self.attempts ) self.attempts += 1 if self.attempts >= self.healthcheck.max_attempts: self.exit_future.set_result(True) self._teardown() self.transport.close() obj_ = self.healthcheck.callback() if asyncio.iscoroutine(obj_): self.loop.create_task(obj_) return self.send(OscMessage(*self.healthcheck.request_pattern)) await asyncio.sleep(sleep_time) ### PUBLIC METHODS ### async def connect( self, ip_address: str, port: int, *, healthcheck: HealthCheck = None ): if self.is_running: raise OscProtocolAlreadyConnected self._setup(ip_address, port, healthcheck) self.loop = asyncio.get_running_loop() self.exit_future = self.loop.create_future() _, protocol = await self.loop.create_datagram_endpoint( lambda: self, remote_addr=(ip_address, port), ) def connection_made(self, transport): loop = asyncio.get_running_loop() self.transport = transport self.is_running = True if self.healthcheck: self.healthcheck_task = loop.create_task(self._run_healthcheck()) def connection_lost(self, exc): pass def datagram_received(self, data, addr): self._validate_receive(data) async def disconnect(self): if not self.is_running: return self.exit_future.set_result(True) self._teardown() if self.loop.is_closed(): return if not self.transport.is_closing(): self.transport.close() if self.healthcheck is not None: await self.healthcheck_task def error_received(self, exc): osc_out_logger.warning(exc) def register( self, pattern, procedure, *, failure_pattern=None, once=False, ) -> OscCallback: callback = self._validate_callback( pattern, procedure, failure_pattern=failure_pattern, once=once ) self._add_callback(callback) return callback def send(self, message): datagram = self._validate_send(message) return self.transport.sendto(datagram) def unregister(self, callback: OscCallback): self._remove_callback(callback) class ThreadedOscServer(socketserver.UDPServer): osc_protocol: "ThreadedOscProtocol" def verify_request(self, request, client_address): self.osc_protocol._process_command_queue() return True def service_actions(self): self.osc_protocol._run_healthcheck() class ThreadedOscHandler(socketserver.BaseRequestHandler): def handle(self): data = self.request[0] self.server.osc_protocol._validate_receive(data) class ThreadedOscProtocol(OscProtocol): ### INITIALIZER ### def __init__(self): OscProtocol.__init__(self) self.command_queue = queue.Queue() self.lock = threading.RLock() self.server = None self.server_thread = None ### SPECIAL METHODS ### def __del__(self): self.disconnect() ### PRIVATE METHODS ### def _process_command_queue(self): while self.command_queue.qsize(): try: action, callback = self.command_queue.get() except queue.Empty: continue if action == "add": self._add_callback(callback) elif action == "remove": self._remove_callback(callback) def _run_healthcheck(self): if self.healthcheck is None or time.time() < self.healthcheck_deadline: return self.healthcheck_deadline += self.healthcheck.timeout * pow( self.healthcheck.backoff_factor, self.attempts ) self.attempts += 1 if self.attempts < self.healthcheck.max_attempts: self.send(OscMessage(*self.healthcheck.request_pattern)) return self._BaseServer__shutdown_request = True with self.lock: self._teardown() self.server = None self.server_thread = None self.healthcheck.callback() def _server_factory(self, ip_address, port): server = ThreadedOscServer( (self.ip_address, self.port), ThreadedOscHandler, bind_and_activate=False ) server.osc_protocol = self return server ### PUBLIC METHODS ### def connect(self, ip_address: str, port: int, *, healthcheck: HealthCheck = None): if self.is_running: raise OscProtocolAlreadyConnected self._setup(ip_address, port, healthcheck) self.healthcheck_deadline = time.time() self.server = self._server_factory(ip_address, port) self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() self.is_running = True def disconnect(self): with self.lock: if not self.is_running: return self._teardown() self.server.shutdown() self.server = None self.server_thread = None def register( self, pattern, procedure, *, failure_pattern=None, once=False, ) -> OscCallback: """ Register a callback. """ callback = self._validate_callback( pattern, procedure, failure_pattern=failure_pattern, once=once ) # Command queue prevents lock contention. self.command_queue.put(("add", callback)) return callback def send(self, message): datagram = self._validate_send(message) try: self.server.socket.sendto(datagram, (self.ip_address, self.port)) except OSError: # print(message) raise def unregister(self, callback: OscCallback): """ Unregister a callback. """ # Command queue prevents lock contention. self.command_queue.put(("remove", callback))
doom_gym.py
import copy import os import random import re import time from os.path import join from threading import Thread import cv2 import gym import numpy as np from filelock import FileLock, Timeout from gym.utils import seeding from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode from sample_factory.algorithms.utils.spaces.discretized import Discretized from sample_factory.utils.utils import log, project_tmp_dir from sample_factory.envs.doom.fixRender import SimpleImageViewer def doom_lock_file(max_parallel): """ Doom instances tend to have problems starting when a lot of them are initialized in parallel. This is not a problem during normal execution once the envs are initialized. The "sweet spot" for the number of envs that can be initialized in parallel is about 5-10. Here we use file locking mechanism to ensure that only a limited amount of envs are being initialized at the same time. This tends to be more of a problem for multiplayer envs. This also has an advantage of working across completely independent process groups, e.g. different experiments. """ lock_filename = f'doom_{random.randrange(0, max_parallel):03d}.lockfile' tmp_dir = project_tmp_dir() lock_path = join(tmp_dir, lock_filename) return lock_path def key_to_action_default(key): """ MOVE_FORWARD MOVE_BACKWARD MOVE_RIGHT MOVE_LEFT SELECT_WEAPON1 SELECT_WEAPON2 SELECT_WEAPON3 SELECT_WEAPON4 SELECT_WEAPON5 SELECT_WEAPON6 SELECT_WEAPON7 ATTACK SPEED TURN_LEFT_RIGHT_DELTA """ from pynput.keyboard import Key # health gathering action_table = { Key.left: 0, Key.right: 1, Key.up: 2, Key.down: 3, } # action_table = { # Key.up: 0, # Key.down: 1, # Key.alt: 6, # Key.ctrl: 11, # Key.shift: 12, # Key.space: 13, # Key.right: 'turn_right', # Key.left: 'turn_left', # } return action_table.get(key, None) class VizdoomEnv(gym.Env): def __init__(self, action_space, config_file, coord_limits=None, max_histogram_length=200, show_automap=False, skip_frames=1, async_mode=False, record_to=None): self.initialized = False # essential game data self.game = None self.state = None self.curr_seed = 0 self.rng = None self.skip_frames = skip_frames self.async_mode = async_mode # optional - for topdown view rendering and visitation heatmaps self.show_automap = show_automap self.coord_limits = coord_limits # can be adjusted after the environment is created (but before any reset() call) via observation space wrapper self.screen_w, self.screen_h, self.channels = 640, 480, 3 self.screen_resolution = ScreenResolution.RES_640X480 self.calc_observation_space() self.black_screen = None # provided as a part of environment definition, since these depend on the scenario and # can be quite complex multi-discrete spaces self.action_space = action_space self.composite_action_space = hasattr(self.action_space, 'spaces') self.delta_actions_scaling_factor = 7.5 if os.path.isabs(config_file): self.config_path = config_file else: scenarios_dir = join(os.path.dirname(__file__), 'scenarios') self.config_path = join(scenarios_dir, config_file) if not os.path.isfile(self.config_path): log.warning( 'File %s not found in scenarios dir %s. Consider providing absolute path?', config_file, scenarios_dir, ) self.variable_indices = self._parse_variable_indices(self.config_path) # only created if we call render() method self.viewer = None # record full episodes using VizDoom recording functionality self.record_to = record_to self.is_multiplayer = False # overridden in derived classes # (optional) histogram to track positional coverage # do not pass coord_limits if you don't need this, to avoid extra calculation self.max_histogram_length = max_histogram_length self.current_histogram, self.previous_histogram = None, None if self.coord_limits: x = (self.coord_limits[2] - self.coord_limits[0]) y = (self.coord_limits[3] - self.coord_limits[1]) if x > y: len_x = self.max_histogram_length len_y = int((y / x) * self.max_histogram_length) else: len_x = int((x / y) * self.max_histogram_length) len_y = self.max_histogram_length self.current_histogram = np.zeros((len_x, len_y), dtype=np.int32) self.previous_histogram = np.zeros_like(self.current_histogram) # helpers for human play with pynput keyboard input self._terminate = False self._current_actions = [] self._actions_flattened = None self._prev_info = None self._last_episode_info = None self._num_episodes = 0 self.mode = 'algo' self.seed() def seed(self, seed=None): self.curr_seed = seeding.hash_seed(seed, max_bytes=4) self.rng, _ = seeding.np_random(seed=self.curr_seed) return [self.curr_seed, self.rng] def calc_observation_space(self): self.observation_space = gym.spaces.Box(0, 255, (self.screen_h, self.screen_w, self.channels), dtype=np.uint8) def _set_game_mode(self, mode): if mode == 'replay': self.game.set_mode(Mode.PLAYER) else: if self.async_mode: log.info('Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster') self.game.set_mode(Mode.ASYNC_PLAYER) else: self.game.set_mode(Mode.PLAYER) def _create_doom_game(self, mode): self.game = DoomGame() self.game.load_config(self.config_path) self.game.set_screen_resolution(self.screen_resolution) self.game.set_seed(self.rng.randint(0, 2**32 - 1)) if mode == 'algo': self.game.set_window_visible(False) elif mode == 'human' or mode == 'replay': self.game.add_game_args('+freelook 1') self.game.set_window_visible(True) else: raise Exception('Unsupported mode') self._set_game_mode(mode) def _game_init(self, with_locking=True, max_parallel=10): lock_file = lock = None if with_locking: lock_file = doom_lock_file(max_parallel) lock = FileLock(lock_file) init_attempt = 0 while True: init_attempt += 1 try: if with_locking: with lock.acquire(timeout=20): self.game.init() else: self.game.init() break except Timeout: if with_locking: log.debug( 'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt, ) except Exception as exc: log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc) from sample_factory.envs.env_utils import EnvCriticalError raise EnvCriticalError() def initialize(self): self._create_doom_game(self.mode) # (optional) top-down view provided by the game engine if self.show_automap: self.game.set_automap_buffer_enabled(True) self.game.set_automap_mode(AutomapMode.OBJECTS) self.game.set_automap_rotate(False) self.game.set_automap_render_textures(False) # self.game.add_game_args("+am_restorecolors") # self.game.add_game_args("+am_followplayer 1") background_color = 'ffffff' self.game.add_game_args('+viz_am_center 1') self.game.add_game_args('+am_backcolor ' + background_color) self.game.add_game_args('+am_tswallcolor dddddd') # self.game.add_game_args("+am_showthingsprites 0") self.game.add_game_args('+am_yourcolor ' + background_color) self.game.add_game_args('+am_cheat 0') self.game.add_game_args('+am_thingcolor 0000ff') # player color self.game.add_game_args('+am_thingcolor_item 00ff00') # self.game.add_game_args("+am_thingcolor_citem 00ff00") self._game_init() self.initialized = True def _ensure_initialized(self): if not self.initialized: self.initialize() @staticmethod def _parse_variable_indices(config): with open(config, 'r') as config_file: lines = config_file.readlines() lines = [l.strip() for l in lines] variable_indices = {} for line in lines: if line.startswith('#'): continue # comment variables_syntax = r'available_game_variables[\s]*=[\s]*\{(.*)\}' match = re.match(variables_syntax, line) if match is not None: variables_str = match.groups()[0] variables_str = variables_str.strip() variables = variables_str.split(' ') for i, variable in enumerate(variables): variable_indices[variable] = i break return variable_indices def _black_screen(self): if self.black_screen is None: self.black_screen = np.zeros(self.observation_space.shape, dtype=np.uint8) return self.black_screen def _game_variables_dict(self, state): game_variables = state.game_variables variables = {} for variable, idx in self.variable_indices.items(): variables[variable] = game_variables[idx] return variables def demo_path(self, episode_idx): demo_name = f'e{episode_idx:03d}.lmp' demo_path = join(self.record_to, demo_name) demo_path = os.path.normpath(demo_path) return demo_path def reset(self): self._ensure_initialized() if self.record_to is not None and not self.is_multiplayer: # does not work in multiplayer (uses different mechanism) if not os.path.exists(self.record_to): os.makedirs(self.record_to) demo_path = self.demo_path(self._num_episodes) log.warning('Recording episode demo to %s', demo_path) self.game.new_episode(demo_path) else: if self._num_episodes > 0: # no demo recording (default) self.game.new_episode() self.state = self.game.get_state() img = None try: img = self.state.screen_buffer except AttributeError: # sometimes Doom does not return screen buffer at all??? Rare bug pass if img is None: log.error('Game returned None screen buffer! This is not supposed to happen!') img = self._black_screen() # Swap current and previous histogram if self.current_histogram is not None and self.previous_histogram is not None: swap = self.current_histogram self.current_histogram = self.previous_histogram self.previous_histogram = swap self.current_histogram.fill(0) self._actions_flattened = None self._last_episode_info = copy.deepcopy(self._prev_info) self._prev_info = None self._num_episodes += 1 return np.transpose(img, (1, 2, 0)) def _convert_actions(self, actions): """Convert actions from gym action space to the action space expected by Doom game.""" if self.composite_action_space: # composite action space with multiple subspaces spaces = self.action_space.spaces else: # simple action space, e.g. Discrete. We still treat it like composite of length 1 spaces = (self.action_space, ) actions = (actions, ) actions_flattened = [] for i, action in enumerate(actions): if isinstance(spaces[i], Discretized): # discretized continuous action # check discretized first because it's a subclass of gym.spaces.Discrete # the order of if clauses here matters! DON'T CHANGE THE ORDER OF IFS! continuous_action = spaces[i].to_continuous(action) actions_flattened.append(continuous_action) elif isinstance(spaces[i], gym.spaces.Discrete): # standard discrete action num_non_idle_actions = spaces[i].n - 1 action_one_hot = np.zeros(num_non_idle_actions, dtype=np.uint8) if action > 0: action_one_hot[action - 1] = 1 # 0th action in each subspace is a no-op actions_flattened.extend(action_one_hot) elif isinstance(spaces[i], gym.spaces.Box): # continuous action actions_flattened.extend(list(action * self.delta_actions_scaling_factor)) else: raise NotImplementedError(f'Action subspace type {type(spaces[i])} is not supported!') return actions_flattened def _vizdoom_variables_bug_workaround(self, info, done): """Some variables don't get reset to zero on game.new_episode(). This fixes it (also check overflow?).""" if done and 'DAMAGECOUNT' in info: log.info('DAMAGECOUNT value on done: %r', info.get('DAMAGECOUNT')) if self._last_episode_info is not None: bugged_vars = ['DEATHCOUNT', 'HITCOUNT', 'DAMAGECOUNT'] for v in bugged_vars: if v in info: info[v] -= self._last_episode_info.get(v, 0) def _process_game_step(self, state, done, info): if not done: observation = np.transpose(state.screen_buffer, (1, 2, 0)) game_variables = self._game_variables_dict(state) info.update(self.get_info(game_variables)) self._update_histogram(info) self._prev_info = copy.deepcopy(info) else: observation = self._black_screen() # when done=True Doom does not allow us to call get_info, so we provide info from the last frame info.update(self._prev_info) self._vizdoom_variables_bug_workaround(info, done) return observation, done, info def step(self, actions): """ Action is either a single value (discrete, one-hot), or a tuple with an action for each of the discrete action subspaces. """ if self._actions_flattened is not None: # provided externally, e.g. via human play actions_flattened = self._actions_flattened self._actions_flattened = None else: actions_flattened = self._convert_actions(actions) default_info = {'num_frames': self.skip_frames} reward = self.game.make_action(actions_flattened, self.skip_frames) state = self.game.get_state() done = self.game.is_episode_finished() observation, done, info = self._process_game_step(state, done, default_info) return observation, reward, done, info def render(self, mode='human'): try: img = self.game.get_state().screen_buffer img = np.transpose(img, [1, 2, 0]) if mode == 'rgb_array': return img h, w = img.shape[:2] render_w = 1280 if w < render_w: render_h = int(render_w * h / w) img = cv2.resize(img, (render_w, render_h)) if self.viewer is None: #from gym.envs.classic_control import rendering #self.viewer = rendering.SimpleImageViewer(maxwidth=render_w) self.viewer = SimpleImageViewer(maxwidth=render_w) self.viewer.imshow(img) return img except AttributeError: return None def close(self): try: if self.game is not None: self.game.close() except RuntimeError as exc: log.warning('Runtime error in VizDoom game close(): %r', exc) if self.viewer is not None: self.viewer.close() def get_info(self, variables=None): if variables is None: variables = self._game_variables_dict(self.game.get_state()) info_dict = {'pos': self.get_positions(variables)} info_dict.update(variables) return info_dict def get_info_all(self, variables=None): if variables is None: variables = self._game_variables_dict(self.game.get_state()) info = self.get_info(variables) if self.previous_histogram is not None: info['previous_histogram'] = self.previous_histogram return info def get_positions(self, variables): return self._get_positions(variables) @staticmethod def _get_positions(variables): have_coord_data = True required_vars = ['POSITION_X', 'POSITION_Y', 'ANGLE'] for required_var in required_vars: if required_var not in variables: have_coord_data = False break x = y = a = np.nan if have_coord_data: x = variables['POSITION_X'] y = variables['POSITION_Y'] a = variables['ANGLE'] return {'agent_x': x, 'agent_y': y, 'agent_a': a} def get_automap_buffer(self): if self.game.is_episode_finished(): return None state = self.game.get_state() map_ = state.automap_buffer map_ = np.swapaxes(map_, 0, 2) map_ = np.swapaxes(map_, 0, 1) return map_ def _update_histogram(self, info, eps=1e-8): if self.current_histogram is None: return agent_x, agent_y = info['pos']['agent_x'], info['pos']['agent_y'] # Get agent coordinates normalized to [0, 1] dx = (agent_x - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0]) dy = (agent_y - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1]) # Rescale coordinates to histogram dimensions # Subtract eps to exclude upper bound of dx, dy dx = int((dx - eps) * self.current_histogram.shape[0]) dy = int((dy - eps) * self.current_histogram.shape[1]) self.current_histogram[dx, dy] += 1 def _key_to_action(self, key): if hasattr(self.action_space, 'key_to_action'): return self.action_space.key_to_action(key) else: return key_to_action_default(key) def _keyboard_on_press(self, key): from pynput.keyboard import Key if key == Key.esc: self._terminate = True return False action = self._key_to_action(key) if action is not None: if action not in self._current_actions: self._current_actions.append(action) def _keyboard_on_release(self, key): action = self._key_to_action(key) if action is not None: if action in self._current_actions: self._current_actions.remove(action) # noinspection PyProtectedMember @staticmethod def play_human_mode(env, skip_frames=1, num_episodes=3, num_actions=None): from pynput.keyboard import Listener doom = env.unwrapped doom.skip_frames = 1 # handled by this script separately # noinspection PyProtectedMember def start_listener(): with Listener(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release) as listener: listener.join() listener_thread = Thread(target=start_listener) listener_thread.start() for episode in range(num_episodes): doom.mode = 'human' env.reset() last_render_time = time.time() time_between_frames = 1.0 / 35.0 total_rew = 0.0 while not doom.game.is_episode_finished() and not doom._terminate: num_actions = 14 if num_actions is None else num_actions turn_delta_action_idx = num_actions - 1 actions = [0] * num_actions for action in doom._current_actions: if isinstance(action, int): actions[action] = 1 # 1 for buttons currently pressed, 0 otherwise else: if action == 'turn_left': actions[turn_delta_action_idx] = -doom.delta_actions_scaling_factor elif action == 'turn_right': actions[turn_delta_action_idx] = doom.delta_actions_scaling_factor for frame in range(skip_frames): doom._actions_flattened = actions _, rew, _, _ = env.step(actions) new_total_rew = total_rew + rew if new_total_rew != total_rew: log.info('Reward: %.3f, total: %.3f', rew, new_total_rew) total_rew = new_total_rew state = doom.game.get_state() verbose = True if state is not None and verbose: info = doom.get_info() print( 'Health:', info['HEALTH'], # 'Weapon:', info['SELECTED_WEAPON'], # 'ready:', info['ATTACK_READY'], # 'ammo:', info['SELECTED_WEAPON_AMMO'], # 'pc:', info['PLAYER_COUNT'], # 'dmg:', info['DAMAGECOUNT'], ) time_since_last_render = time.time() - last_render_time time_wait = time_between_frames - time_since_last_render if doom.show_automap and state.automap_buffer is not None: map_ = state.automap_buffer map_ = np.swapaxes(map_, 0, 2) map_ = np.swapaxes(map_, 0, 1) cv2.imshow('ViZDoom Automap Buffer', map_) if time_wait > 0: cv2.waitKey(int(time_wait) * 1000) else: if time_wait > 0: time.sleep(time_wait) last_render_time = time.time() if doom.show_automap: cv2.destroyAllWindows() log.debug('Press ESC to exit...') listener_thread.join() # noinspection PyProtectedMember @staticmethod def replay(env, rec_path): doom = env.unwrapped doom.mode = 'replay' doom._ensure_initialized() doom.game.replay_episode(rec_path) episode_reward = 0 start = time.time() while not doom.game.is_episode_finished(): doom.game.advance_action() r = doom.game.get_last_reward() episode_reward += r log.info('Episode reward: %.3f, time so far: %.1f s', episode_reward, time.time() - start) log.info('Finishing replay') doom.close()
pranjan77contigfilter4Server.py
#!/usr/bin/env python from wsgiref.simple_server import make_server import sys import json import traceback import datetime from multiprocessing import Process from getopt import getopt, GetoptError from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\ JSONRPCError, ServerError, InvalidRequestError from os import environ from ConfigParser import ConfigParser from biokbase import log import biokbase.nexus import requests as _requests import urlparse as _urlparse import random as _random import os import requests.packages.urllib3 DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'pranjan77contigfilter4'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from pranjan77contigfilter4.pranjan77contigfilter4Impl import pranjan77contigfilter4 impl_pranjan77contigfilter4 = pranjan77contigfilter4(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) sync_methods = {} async_run_methods = {} async_check_methods = {} async_run_methods['pranjan77contigfilter4.filter_contigs_async'] = ['pranjan77contigfilter4', 'filter_contigs'] async_check_methods['pranjan77contigfilter4.filter_contigs_check'] = ['pranjan77contigfilter4', 'filter_contigs'] sync_methods['pranjan77contigfilter4.filter_contigs'] = True class AsyncJobServiceClient(object): def __init__(self, timeout=30 * 60, token=None, ignore_authrc=True, trust_all_ssl_certificates=False): url = environ.get('KB_JOB_SERVICE_URL', None) if url is None and config is not None: url = config.get('job-service-url') if url is None: raise ValueError('Neither \'job-service-url\' parameter is defined in '+ 'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system') scheme, _, _, _, _, _ = _urlparse.urlparse(url) if scheme not in ['http', 'https']: raise ValueError(url + " isn't a valid http url") self.url = url self.timeout = int(timeout) self._headers = dict() self.trust_all_ssl_certificates = trust_all_ssl_certificates if token is None: raise ValueError('Authentication is required for async methods') self._headers['AUTHORIZATION'] = token if self.timeout < 1: raise ValueError('Timeout value must be at least 1 second') def _call(self, method, params, json_rpc_call_context = None): arg_hash = {'method': method, 'params': params, 'version': '1.1', 'id': str(_random.random())[2:] } if json_rpc_call_context: arg_hash['context'] = json_rpc_call_context body = json.dumps(arg_hash, cls=JSONObjectEncoder) ret = _requests.post(self.url, data=body, headers=self._headers, timeout=self.timeout, verify=not self.trust_all_ssl_certificates) if ret.status_code == _requests.codes.server_error: if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json': err = json.loads(ret.text) if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, ret.text) else: raise ServerError('Unknown', 0, ret.text) if ret.status_code != _requests.codes.OK: ret.raise_for_status() resp = json.loads(ret.text) if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'] def run_job(self, run_job_params, json_rpc_call_context = None): return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0] def check_job(self, job_id, json_rpc_call_context = None): return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0] class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = ServerError() newerr.trace = traceback.format_exc() newerr.data = e.__str__() raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if self.method_data[request['method']].has_key('types'): # @IgnorePep8 self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'pranjan77contigfilter4' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_pranjan77contigfilter4.filter_contigs, name='pranjan77contigfilter4.filter_contigs', types=[dict]) self.method_authentication['pranjan77contigfilter4.filter_contigs'] = 'required' self.auth_client = biokbase.nexus.Client( config={'server': 'nexus.api.globusonline.org', 'verify_ssl': True, 'client': None, 'client_secret': None}) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]} prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] if method_name in async_run_methods: method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1] if method_name in async_check_methods: method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1] auth_req = self.method_authentication.get(method_name, "none") if auth_req != "none": if token is None and auth_req == 'required': err = ServerError() err.data = "Authentication required for " + \ "pranjan77contigfilter4 but no authentication header was passed" raise err elif token is None and auth_req == 'optional': pass else: try: user, _, _ = \ self.auth_client.validate_token(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception, e: if auth_req == 'required': err = ServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) method_name = req['method'] if method_name in async_run_methods or method_name in async_check_methods: if method_name in async_run_methods: orig_method_pair = async_run_methods[method_name] else: orig_method_pair = async_check_methods[method_name] orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1] if 'required' != self.method_authentication.get(orig_method_name, 'none'): err = ServerError() err.data = 'Async method ' + orig_method_name + ' should require ' + \ 'authentication, but it has authentication level: ' + \ self.method_authentication.get(orig_method_name, 'none') raise err job_service_client = AsyncJobServiceClient(token = ctx['token']) if method_name in async_run_methods: run_job_params = { 'method': orig_method_name, 'params': req['params']} if 'rpc_context' in ctx: run_job_params['rpc_context'] = ctx['rpc_context'] job_id = job_service_client.run_job(run_job_params) respond = {'version': '1.1', 'result': [job_id], 'id': req['id']} rpc_result = json.dumps(respond, cls=JSONObjectEncoder) status = '200 OK' else: job_id = req['params'][0] job_state = job_service_client.check_job(job_id) finished = job_state['finished'] if finished != 0 and 'error' in job_state and job_state['error'] is not None: err = {'error': job_state['error']} rpc_result = self.process_error(err, ctx, req, None) else: respond = {'version': '1.1', 'result': [job_state], 'id': req['id']} rpc_result = json.dumps(respond, cls=JSONObjectEncoder) status = '200 OK' elif method_name in sync_methods or (method_name + '_async') not in async_run_methods: self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' else: err = ServerError() err.data = 'Method ' + method_name + ' cannot be run synchronously' raise err except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception, e: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print 'The request method was %s\n' % environ['REQUEST_METHOD'] # print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8 # print 'The request body was: %s' % request_body # print 'The result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] if 'error' not in error['error'] or error['error']['error'] is None: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print "Monkeypatching std libraries for async" from gevent import monkey monkey.patch_all() uwsgi.applications = { '': application } except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print "Listening on port %s" % port if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user, _, _ = application.auth_client.validate_token(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception, e: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": requests.packages.urllib3.disable_warnings() if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print "Host set to %s" % host else: assert False, "unhandled option" start_server(host=host, port=port) # print "Listening on port %s" % port # httpd = make_server( host, port, application) # # httpd.serve_forever()
cycle_control_old.py
# -*- coding: utf-8 -*- #----------Metadata-------------------------------- #This script is responsible for the auto cycle modus. # #----------Imports-------------------------------- import serial import time import Tools import flap_control as flap import threading import temperature_limits as TempLimits import sys,os import sensor_temperature_control as TempSensor #------globale Attribute----------------------------- stopZyklus=False nachlegen=False temperatur=100 #wird nur solange verwendet wie noch kein sensor angeschlossen ist phase2_wiederholen=True phase1ausfuehren=True #variable ändert sich, wenn bestimmte Phasen überspringt werden sollen Path_currentTemp = "/home/pi/Desktop/FireControl/Daten/currentTempGas.txt" #----------Methods------------------------------------ def starteZyklus(): try: global phase1ausfuehren #================================= Vorbedingungen prüfen ==================================================== print("Checking Preconditions...") if TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle3(): Tools.sendcommand("page Zustand") #Seite Zustand anzeigen recieved = Tools.recievecommand() if recieved=="s12b0": #Aufheizen wurde gedrückt -> gehe zu Phase 1 zyklus_thread = threading.Thread(target=__zyklus).start() Tools.sendcommand("page Home") return if recieved=="s12b1": # Abheizen wurde gedrückt phase1ausfuehren=False zyklus_thread = threading.Thread(target=__zyklus).start() Tools.sendcommand("page Home") return if recieved=="s12b2": #User drückt zurück Button return else: zyklus_thread = threading.Thread(target=__zyklus).start() except Exception as e: print("Exception in Cyclce_control.py in check preconditions: "+ str(e)) Tools.sendcommand('Error.t1.txt="'+str(e)+'"') Tools.sendcommand("bt4.val=0") #set Button back to not activated Tools.sendcommand("tsw bt1,1") Tools.sendcommand("tsw bt2,1") Tools.sendcommand("tsw bt3,1") Tools.sendcommand("tsw bt0,1") def __zyklus(): currentZyklusPhase=0 global stopZyklus global nachlegen global phase2_wiederholen global phase1ausfuehren print("Auto-Zyklus wird gestartet") Tools.sendcommand("tsw bt1,0") Tools.sendcommand("tsw bt2,0") Tools.sendcommand("tsw bt3,0") Tools.sendcommand("tsw bt0,0") try: #if phase1ausfuehren: #================================= Phase 1 ==================================================== phase2_wiederholen=True while phase2_wiederholen: phase2_wiederholen=False #Phase 1: Phase beginnt #------------------------- print("Phase 1 startet") flap.phase1() currentZyklusPhase =1 Tools.sendcommand("Home.bt1.val=1") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt0.val=0") #Phase 1: wartet auf ende #------------------------- #print("Phase 1 wartet auf ende") if phase1ausfuehren: try: while TempSensor.getTempRauchgas()<TempLimits.getValueOfTempschwelle1(): time.sleep(1) if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht nachlegen=False #variable wird zurückgesetzt, weil evtl. nachlegen gedrückt wurde #=========8======================== Phase 2 ==================================================== print("consider delay of templimit") time.sleep(60*TempLimits.getValueOfTempschwelle1delay()) print("templimit delay over. proceed with next phase...") except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) print(sys.exc_info()) print("Expection in Phase 1") phase1ausfuehren=True #Phase 2: Phase beginnt #------------------------- print("Phase 2 startet") flap.phase2() currentZyklusPhase = 2 Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt2.val=1") Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt0.val=0") #Phase 2: wartet auf ende #------------------------- #print("Phase 2 wartet auf ende") while TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle2(): # Warte bis temp unter 500 Grad sinkt time.sleep(2) if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht nachlegen=False #variable wird zurückgesetzt, weil evtl. nachlegen gedrückt wurde #================================= Phase 3 ==================================================== print("consider delay of templimit") time.sleep(60*TempLimits.getValueOfTempschwelle2delay()) print("templimit delay over. proceed with next phase...") #Phase 3: Phase 3 beginnt #------------------------- print("Phase 3 startet") flap.phase3() currentZyklusPhase = 3 Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt3.val=1") Tools.sendcommand("Home.bt0.val=0") #Phase 3: Phase wartet auf ende #------------------------- print("Phase 3 wartet auf ende") while TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle3() and phase2_wiederholen==False: # Warte bis temp unter 500 Grad sinkt if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht if nachlegen: # Wenn Holz nachgelegt wird #if float(Tools.ReadFile(Path_currentTemp))>TempLimits.getValueOfTempschwelle2(): # Wenn Rauchgas wieder Temperatur von Phase 2 erreicht phase2_wiederholen=True # Phase 2 wird wiederholt nachlegen=False #variable wird zurückgesetzt, weil nachlegen gedürckt wurde time.sleep(2) #================================= Phase 4 ==================================================== #Phase 4: Phase beginnt #------------------------- print("Phase 4 startet") flap.phase4() currentZyklusPhase = 4 Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt0.val=1") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt4.val=0") Tools.sendcommand("tsw bt1,1") Tools.sendcommand("tsw bt2,1") Tools.sendcommand("tsw bt3,1") print("Zyklus beendet") except Exception as e: print("Fehler in Zyklussteuerung:") exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) print(sys.exc_info()) print(str(e)) print("Zyklus wurde gestoppt!") stopZyklus=False nachlegen=False #Alle Knöpfe ausschalten Tools.sendcommand("page Home") Tools.sendcommand("Home.bt0.val=0") Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt4.val=0") ## flap.phase4() -> not used anymore, clap should keep position and shouldnt close completly if currentZyklusPhase == 1: Tools.sendcommand("Home.bt1.val=1") if currentZyklusPhase == 2: Tools.sendcommand("Home.bt2.val=1") if currentZyklusPhase == 3: Tools.sendcommand("Home.bt3.val=1") if currentZyklusPhase == 4: Tools.sendcommand("Home.bt0.val=1") def stoppeZyklus(): global stopZyklus stopZyklus = True def nachlegen_triggern(): global nachlegen print("Nachlegen wurde getriggert") nachlegen=True Tools.sendcommand("Home.bt4.val=1") #Zyklus Button geht aus nachdem er einmal gedrückt worden ist, da er aber noch an sein soll wir er hier nochmal angeschaltet Tools.sendcommand("page Home") def _executePhase1(): global stopZyklus global nachlegen global phase2_wiederholen global phase1ausfuehren try: while TempSensor.getTempRauchgas()<TempLimits.getValueOfTempschwelle1(): time.sleep(1) if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht nachlegen=False #variable wird zurückgesetzt, weil evtl. nachlegen gedrückt wurde #=========8======================== Phase 2 ==================================================== print("consider delay of templimit") time.sleep(60*TempLimits.getValueOfTempschwelle1delay()) print("templimit delay over. proceed with next phase...") except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) print(sys.exc_info()) print("Expection in Phase 1") def _executePhase2(): global stopZyklus global nachlegen global phase2_wiederholen global phase1ausfuehren print("Start Phase 2") flap.phase2() currentZyklusPhase = 2 Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt2.val=1") Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt0.val=0") print("Phase 2 wait for end") while TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle2(): # Warte bis temp unter 500 Grad sinkt time.sleep(2) if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht nachlegen=False #variable wird zurückgesetzt, weil evtl. nachlegen gedrückt wurde print("consider delay of templimit") time.sleep(60*TempLimits.getValueOfTempschwelle2delay()) print("Phase 3 delay time over. Phase 3 will end") def _executePhase3(): global stopZyklus global nachlegen global phase2_wiederholen global phase1ausfuehren print("Start Phase 3") flap.phase3() currentZyklusPhase = 3 Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt3.val=1") Tools.sendcommand("Home.bt0.val=0") #Phase 3: Phase wartet auf ende #------------------------- print("Phase 3 waiting for end") while TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle3() and phase2_wiederholen==False: # Warte bis temp unter 500 Grad sinkt if stopZyklus: print("User bricht Zyklus ab") stopZyklus=False raise Exception #Werfe Exception wenn Nutzer Zyklus abbricht if nachlegen: # Wenn Holz nachgelegt wird #if float(Tools.ReadFile(Path_currentTemp))>TempLimits.getValueOfTempschwelle2(): # Wenn Rauchgas wieder Temperatur von Phase 2 erreicht phase2_wiederholen=True # Phase 2 wird wiederholt nachlegen=False #variable wird zurückgesetzt, weil nachlegen gedürckt wurde time.sleep(2) def _executePhase4(): print("Start Phase 4") flap.phase4() currentZyklusPhase = 4 Tools.sendcommand("Home.bt3.val=0") Tools.sendcommand("Home.bt0.val=1") Tools.sendcommand("Home.bt2.val=0") Tools.sendcommand("Home.bt1.val=0") Tools.sendcommand("Home.bt4.val=0") Tools.sendcommand("tsw bt1,1") Tools.sendcommand("tsw bt2,1") Tools.sendcommand("tsw bt3,1") print("Zyklus beendet") def _checkingPreconditions(): print("Checking Preconditions...") if TempSensor.getTempRauchgas()>TempLimits.getValueOfTempschwelle3(): Tools.sendcommand("page Zustand") #Seite Zustand anzeigen recieved = Tools.recievecommand() if recieved=="s12b0": #Aufheizen wurde gedrückt -> gehe zu Phase 1 zyklus_thread = threading.Thread(target=__zyklus).start() Tools.sendcommand("page Home") return if recieved=="s12b1": # Abheizen wurde gedrückt phase1ausfuehren=False zyklus_thread = threading.Thread(target=__zyklus).start() Tools.sendcommand("page Home") return if recieved=="s12b2": #User drückt zurück Button return else: print("Start Temperatur is smaller then templimit 3. Start Cyclus Phase 1") zyklus_thread = threading.Thread(target=__zyklus).start()
DrowinessDetection.py
import imutils import face_recognition import cv2 from scipy.spatial import distance as dist import playsound from threading import Thread import numpy as np import os MIN_AER =0.30 EYE_AR_CONSEC_FRAMES =10 COUNTER = 0 ALARM_ON = False def playAlarm(soundfile): playsound.playsound(soundfile) def eye_aspect_ratio(eye): V1 = dist.euclidean(eye[1],eye[5]) V2 = dist.euclidean(eye[2], eye[4]) H = dist.euclidean(eye[0], eye[3]) ear = (V1+V2)/(2.0*H) return ear def main(): global COUNTER,ALARM_ON video_capture = cv2.VideoCapture(0) video_capture.set(3,320) video_capture.set(4,240) while True: ret ,frame = video_capture.read() face_landmarks_list = face_recognition.face_landmarks(frame) for face_landmark in face_landmarks_list: leftEye = face_landmark["left_eye"] rightEye = face_landmark["right_eye"] lpts = np.array(leftEye) rpts = np.array(rightEye) cv2.polylines(frame,[lpts],True,(255,255,0),1) cv2.polylines(frame, [rpts], True, (255, 255, 0), 1) leftEAR = eye_aspect_ratio(leftEye) rightEAR = eye_aspect_ratio(rightEye) ear = (leftEAR+rightEAR)/2 if ear<MIN_AER: COUNTER+=1 if COUNTER>=EYE_AR_CONSEC_FRAMES: if not ALARM_ON: ALARM_ON = True t = Thread(target=playAlarm,args=('alarm.wav',)) t.daemon = True t.start() cv2.putText(frame,"ALERT! You are feeling asleep!",(10,10), cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,255),1) if COUNTER>=30: shutDownComputer() else: COUNTER=0 ALARM_ON = False cv2.putText(frame, "EAR: {:.2f}".format(ear), (250, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1) cv2.imshow("Sleep detection system",frame) if cv2.waitKey(1)== ord('q'): break video_capture.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
netwolf.py
import threading import time from udp import UDP from cluster_manager import ClusterManager from transfer_manager import TransferManager ENCODING = 'utf-8' class NetWolf: def __init__(self, udp_listener_port, files_directory, cluster_list_address, discovery_period, max_clients, free_ride_delay, request_timeout): self.udp_listener = UDP(udp_listener_port) listener_info = self.udp_listener.get_host_udp_info() self.cluster_manager = ClusterManager(listener_info, cluster_list_address, discovery_period) self.transfer_manager = TransferManager(files_directory, max_clients, listener_info) self.running = None self.requested = [] self.prior_communications = [] self.free_ride_delay = free_ride_delay self.request_timeout = request_timeout def start(self): self.running = True threading.Thread(target=self.run_receiver).start() self.cluster_manager.start() def stop(self): print("Terminating...") self.running = False self.cluster_manager.stop() self.transfer_manager.stop() time.sleep(1) self.udp_listener.close() print("Terminated!") def run_receiver(self): while self.running: data, sender = self.udp_listener.receive() if self.running and data: threading.Thread(target=self.handle_data, args=(data, sender,)).start() def handle_data(self, data, sender): message = data.decode(ENCODING) split_message = message.split('\n') if split_message[0] == 'cluster': self.cluster_manager.add_to_cluster(split_message[1]) elif split_message[0] == 'request': print("Request received from", split_message[1], "for", split_message[2]) if split_message[1] not in self.prior_communications: print("Delaying request of free rider for", self.free_ride_delay, "seconds") time.sleep(self.free_ride_delay) split_line = split_message[1].split() receiver = (split_line[0], int(split_line[1])) self.transfer_manager.send_file(receiver, split_message[2]) elif split_message[0] == 'server': if split_message[2] in self.requested: self.requested.remove(split_message[2]) print("TCP server listening at", split_message[1], "to send", split_message[2]) split_line = split_message[1].split() server_address = (split_line[0], int(split_line[1])) self.prior_communications.append(split_message[3]) self.transfer_manager.receive_file(server_address, split_message[2]) else: print(split_message[2], "is not requested anymore!") else: print("The message is not supported!") def request_file(self, file_name): print("Requesting for", file_name) self.requested.append(file_name) self.cluster_manager.send_request(file_name) threading.Timer(self.request_timeout, self.check_request, (file_name,)).start() def check_request(self, file_name): if file_name in self.requested: print("File was not found in the cluster!") self.requested.remove(file_name) def print_list(self): self.cluster_manager.print_cluster()