source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
lock.py | #! /usr/bin/python -u
# LICENSE UPL 1.0
#
# Copyright (c) 2020 Oracle and/or its affiliates. All rights reserved.
#
# Since: Mar, 2020
# Author: mohammed.qureshi@oracle.com
# Description: Provides file locking support
#
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
import os
import time
import sys
import signal
import argparse
import fcntl
import tempfile
import threading, subprocess
from multiprocessing.connection import Listener, Client
# Multiprocess communication auth key
AUTHKEY = 'vkidSQkgAHc='
DIR_LOCK_FILE = os.sep + '.dirlock'
def acquire_lock(lock_file, sock_file, block, heartbeat):
"""
Acquire a lock on the passed file, block if needed
:param lock_file:
:param sock_file:
:param block:
:return:
"""
# get dir lock first to check lock file existence
with open(os.path.dirname(lock_file) + DIR_LOCK_FILE, 'w') as dir_lh:
fcntl.flock(dir_lh, fcntl.LOCK_EX)
if not os.path.exists(lock_file):
print('[%s]: Creating %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file)))
open(lock_file, 'w').close()
lock_handle = open(lock_file)
print('[%s]: Acquiring lock %s with heartbeat %s secs' %
(time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file), heartbeat))
while True:
try:
fcntl.flock(lock_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
print('[%s]: Lock acquired' % (time.strftime('%Y:%m:%d %H:%M:%S')))
with open(os.path.dirname(lock_file) + DIR_LOCK_FILE, 'w') as dir_lh:
fcntl.flock(dir_lh, fcntl.LOCK_EX)
print('[%s]: Starting heartbeat' % (time.strftime('%Y:%m:%d %H:%M:%S')))
os.utime(lock_file, None)
break
except IOError as e:
if not block:
print(e)
return 1
time.sleep(0.1)
# to handle stale NFS locks
pulse = int(time.time() - os.path.getmtime(lock_file))
if heartbeat < pulse:
# something is wrong
print('[%s]: Lost heartbeat by %s secs' % (time.strftime('%Y:%m:%d %H:%M:%S'), pulse))
lock_handle.close()
# get dir lock
with open(os.path.dirname(lock_file) + DIR_LOCK_FILE, 'w') as dir_lh:
fcntl.flock(dir_lh, fcntl.LOCK_EX)
# pulse check again after acquring dir lock
if heartbeat < int(time.time() - os.path.getmtime(lock_file)):
print('[%s]: Recreating %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file)))
os.remove(lock_file)
open(lock_file, 'w').close()
lock_handle = open(lock_file)
print('[%s]: Reacquiring lock %s' %
(time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file)))
if os.fork():
return 0
else:
# Spawn a child process to hold on to the lock
if os.path.exists(sock_file):
os.remove(sock_file)
print('[%s]: Lock held %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file)))
listener = Listener(address=sock_file, authkey=AUTHKEY)
def listen():
while True:
conn = listener.accept()
if conn.recv():
break
release()
def release(sig=None, frame=None):
"""
Release if the process is stopped/terminated
:param sig:
:param frame:
:return:
"""
# Hold on to the lock for other container
# processes to terminate first. Allow 30 secs timeout
if sig:
time.sleep(30)
lock_handle.close()
listener.close()
print('[%s]: Lock released %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(lock_file)))
signal.signal(signal.SIGTERM, release)
signal.signal(signal.SIGINT, release)
threading.Thread(target=listen).start()
while not lock_handle.closed:
os.utime(lock_file, None)
time.sleep(5)
def check_lock(sock_file):
"""
Check if lock is held
:param sock_file:
:return:
"""
if not os.path.exists(sock_file):
return 1
print('[%s]: Connecting to the lock process %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), sock_file))
cl = Client(address=sock_file, authkey=AUTHKEY)
cl.send(False)
cl.close()
print('[%s]: Lock held %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(sock_file)))
return 0
def release_lock(sock_file):
"""
Release the lock by connecting to lock process and terminating it
:param sock_file:
:return:
"""
if not os.path.exists(sock_file):
return 1
print('[%s]: Releasing lock %s' % (time.strftime('%Y:%m:%d %H:%M:%S'), os.path.basename(sock_file)))
cl = Client(address=sock_file, authkey=AUTHKEY)
cl.send(True)
cl.close()
return 1
def main():
"""
Main function, sets up arg parsing
:return:
"""
parser = argparse.ArgumentParser(prog=sys.argv[0])
parser.add_argument('--acquire', action='store_true', dest='acquire')
parser.add_argument('--check', action='store_true', dest='check')
parser.add_argument('--release', action='store_true', dest='release')
parser.add_argument('--file', dest='lock_file')
parser.add_argument('--block', action='store_true', dest='block')
# heartbeat in secs
parser.add_argument('--heartbeat', type=int, dest='heartbeat', default=30)
args = parser.parse_args()
if not args.lock_file:
parser.print_help()
sys.exit()
# Derive sock_file name from lock_file
sock_file = os.path.join(tempfile.gettempdir(), os.path.basename(args.lock_file))
if args.acquire:
sys.exit(acquire_lock(args.lock_file, sock_file, args.block, args.heartbeat))
elif args.check:
sys.exit(check_lock(sock_file))
elif args.release:
sys.exit(release_lock(sock_file))
# Entry point
if __name__ == '__main__':
main()
|
oase_accept.py | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
運用基盤連携処理
"""
import os
import sys
import django
import json
import pytz
import datetime
import subprocess
import traceback
import ast
import pika
import time
import threading
import copy
from time import sleep
# --------------------------------
# 環境変数取得
# --------------------------------
try:
oase_root_dir = os.environ['OASE_ROOT_DIR']
run_interval = os.environ['RUN_INTERVAL']
python_module = os.environ['PYTHON_MODULE']
log_level = os.environ['LOG_LEVEL']
except Exception as e:
oase_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
run_interval = "10"
python_module = "/usr/bin/python3"
log_level = "NORMAL"
# --------------------------------
# パス追加
# --------------------------------
sys.path.append(oase_root_dir)
# --------------------------------
# django読み込み
# --------------------------------
os.environ['DJANGO_SETTINGS_MODULE'] = 'confs.frameworkconfs.settings'
django.setup()
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from libs.backyardlibs.backyard_common import disconnect
from libs.commonlibs.oase_logger import OaseLogger
logger = OaseLogger.get_instance() # ロガー初期化
from web_app.models.models import User, EventsRequest, RuleType
from web_app.serializers.events_request import EventsRequestSerializer
from libs.commonlibs import define as defs
from libs.commonlibs.rabbitmq import RabbitMQ
from libs.webcommonlibs.events_request import EventsRequestCommon
from libs.webcommonlibs.common import TimeConversion
# MAX件数
MAX_COUNT = 100
THREAD_LOCK = threading.Lock()
data_obj_list = []
################################################################
def check_key_error(trace_id, json_str):
"""
[メソッド概要]
イベントリクエストの各キーの正常性チェック
※try の中で呼び出すこと
"""
err_code = EventsRequestCommon.check_events_request_key(json_str)
if err_code != EventsRequestCommon.REQUEST_OK:
err_keyname = ''
if err_code == EventsRequestCommon.REQUEST_ERR_RULETYPE_KEY:
err_keyname = EventsRequestCommon.KEY_RULETYPE
elif err_code == EventsRequestCommon.REQUEST_ERR_REQTYPE_KEY:
err_keyname = EventsRequestCommon.KEY_REQTYPE
elif err_code == EventsRequestCommon.REQUEST_ERR_DATETIME_KEY:
err_keyname = EventsRequestCommon.KEY_EVENTTIME
elif err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_KEY:
err_keyname = EventsRequestCommon.KEY_EVENTINFO
logger.user_log('LOSM22001', err_keyname, trace_id)
raise Exception()
################################################
def check_evinfo_error(trace_id, json_str, ruletypeid, evinfo_length):
"""
[メソッド概要]
イベントリクエストのイベント情報の正常性チェック
※try の中で呼び出すこと
"""
# イベント情報のチェック
err_code = EventsRequestCommon.check_events_request_len(
json_str, evinfo_length)
if err_code != EventsRequestCommon.REQUEST_OK:
if err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_TYPE:
logger.user_log('LOSM22002', trace_id,
ruletypeid, 0, evinfo_length)
raise Exception()
elif err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_LENGTH:
logger.user_log('LOSM22002', trace_id, ruletypeid, len(
json_str[EventsRequestCommon.KEY_EVENTINFO]), evinfo_length)
raise Exception()
raise Exception()
################################################
def make_evinfo_str(json_str):
"""
[メソッド概要]
DB登録用にイベント情報を文字列に整形
"""
evinfo_str = ''
for v in json_str[EventsRequestCommon.KEY_EVENTINFO]:
if evinfo_str:
evinfo_str += ','
if not isinstance(v, list):
evinfo_str += '"%s"' % (v)
else:
temp_val = '['
for i, val in enumerate(v):
if i > 0:
temp_val += ','
temp_val += '"%s"' % (val)
temp_val += ']'
evinfo_str += '%s' % (temp_val)
return evinfo_str
################################################
def data_list(body, user, rule_type_id_list, label_count_list):
"""
[メソッド概要]
DB登録するデータをリストにする。
"""
global data_obj_list
now = datetime.datetime.now(pytz.timezone('UTC'))
evinfo_length = 0
ruletypeid = 0
msg = ''
event_dt = '----/--/-- --:--:--'
disconnect()
try:
# フォーマットのチェック
try:
json_str = json.loads(body.decode('UTF-8'))
except json.JSONDecodeError:
logger.user_log('LOSM22000')
raise Exception()
trace_id = json_str[EventsRequestCommon.KEY_TRACEID]
logger.system_log('LOSI22000', trace_id)
# キーのチェック
check_key_error(trace_id, json_str)
# ルール情報の取得
reqtypeid = json_str[EventsRequestCommon.KEY_REQTYPE]
ruletablename = json_str[EventsRequestCommon.KEY_RULETYPE]
if True:
rule_type_id_list.update({ruletablename: 0})
label_count_list.update({ruletablename: 0})
rset = RuleType.objects.filter(rule_type_name=ruletablename).values(
'rule_type_id', 'rule_type_name', 'label_count')
for rs in rset:
rule_type_id_list.update(
{rs['rule_type_name']: rs['rule_type_id']})
label_count_list.update(
{rs['rule_type_name']: rs['label_count']})
if ruletablename in rule_type_id_list:
ruletypeid = rule_type_id_list[ruletablename]
evinfo_length = label_count_list[ruletablename]
# イベント情報のチェック
check_evinfo_error(trace_id, json_str, ruletypeid, evinfo_length)
# DB登録用に整形
time_zone = settings.TIME_ZONE
evinfo_str = make_evinfo_str(json_str)
evinfo_str = '{"EVENT_INFO":[%s]}' % (evinfo_str)
event_dt = json_str[EventsRequestCommon.KEY_EVENTTIME]
event_dt = TimeConversion.get_time_conversion_utc(
event_dt, time_zone)
json_data = {
'trace_id': trace_id,
'request_type_id': reqtypeid,
'rule_type_id': ruletypeid,
'request_reception_time': now,
'request_user': 'OASE Web User',
'request_server': 'OASE Web',
'event_to_time': event_dt,
'event_info': evinfo_str,
'status': defs.UNPROCESS,
'status_update_id': '',
'retry_cnt': 0,
'last_update_timestamp': now,
'last_update_user': user.user_name,
}
# バリデーションチェック
oters = EventsRequestSerializer(data=json_data)
result_valid = oters.is_valid()
# バリデーションエラー
if result_valid == False:
msg = '%s' % oters.errors
logger.user_log('LOSM22003', trace_id, msg)
return False
# 正常の場合はリスト登録
else:
data_object = EventsRequest(
trace_id=trace_id,
request_type_id=reqtypeid,
rule_type_id=ruletypeid,
request_reception_time=now,
request_user='OASE Web User',
request_server='OASE Web',
event_to_time=event_dt,
event_info=evinfo_str,
status=defs.UNPROCESS,
status_update_id='',
retry_cnt=0,
last_update_timestamp=now,
last_update_user=user.user_name
)
data_obj_list.append(data_object)
return True
except Exception as e:
logger.system_log('LOSM22004', traceback.format_exc())
return False
################################################
def bulk_create():
"""
[メソッド概要]
EventsRequestテーブルに登録
"""
global data_obj_list
global thread_flg
try:
thread_flg = False
with THREAD_LOCK:
data_obj_len = len(data_obj_list)
if data_obj_len <= 0:
return
# 登録用配列にコピー
tmp_data = copy.deepcopy(data_obj_list)
data_obj_list = []
# 一括DB登録
EventsRequest.objects.bulk_create(tmp_data)
# 登録用配列初期化
tmp_data = []
except Exception as e:
logger.system_log('LOSM22005', traceback.format_exc())
################################################
def load_ruletype():
"""
[メソッド概要]
ルール種別管理テーブル情報を読み込む
"""
rule_type_id_list = {}
label_count_list = {}
ruletype = list(RuleType.objects.all().values(
'rule_type_id', 'rule_type_name', 'label_count'))
for rt in ruletype:
rule_type_id = {}
label_count = {}
rule_type_id[rt['rule_type_name']] = rt['rule_type_id']
label_count[rt['rule_type_name']] = rt['label_count']
rule_type_id_list.update(rule_type_id)
label_count_list.update(label_count)
return rule_type_id_list, label_count_list
################################################
if __name__ == '__main__':
# 初期化
loop_count = 0
thread_flg = False
# データ読み込み
rule_type_id_list, label_count_list = load_ruletype()
# 起動時設定情報取得
user = User.objects.get(user_id=1)
accept_settings = RabbitMQ.settings()
# rabbitMQ接続
channel, connection = RabbitMQ.connect(accept_settings)
# キューに接続
channel.queue_declare(queue=accept_settings['queuename'], durable=True)
# ループ
for method_frame, properties, body in channel.consume(accept_settings['queuename']):
if method_frame:
# DB登録リストを作成
_ = data_list(body, user, rule_type_id_list, label_count_list)
# RabbitMQから取得データを消費
channel.basic_ack(method_frame.delivery_tag)
# ループカウントアップ
loop_count = len(data_obj_list)
# コミット件数の場合、DB登録
if loop_count >= MAX_COUNT:
thread_flg = True
thrd = threading.Thread(target=bulk_create())
thrd.start()
elif not thread_flg:
thread_flg = True
thrd = threading.Timer(0.1, bulk_create)
thrd.start()
# 念のためclose処理
channel.close()
connection.close()
|
node_provider.py | import random
import copy
import threading
from collections import defaultdict
import logging
import boto3
import botocore
from botocore.config import Config
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.aws.config import bootstrap_aws
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_KIND, TAG_RAY_USER_NODE_TYPE
from ray.ray_constants import BOTO_MAX_RETRIES, BOTO_CREATE_MAX_RETRIES
from ray.autoscaler.log_timer import LogTimer
from ray.autoscaler.aws.utils import boto_exception_handler
from ray.autoscaler.cli_logger import cli_logger
import colorful as cf
logger = logging.getLogger(__name__)
def to_aws_format(tags):
"""Convert the Ray node name tag to the AWS-specific 'Name' tag."""
if TAG_RAY_NODE_NAME in tags:
tags["Name"] = tags[TAG_RAY_NODE_NAME]
del tags[TAG_RAY_NODE_NAME]
return tags
def from_aws_format(tags):
"""Convert the AWS-specific 'Name' tag to the Ray node name tag."""
if "Name" in tags:
tags[TAG_RAY_NODE_NAME] = tags["Name"]
del tags["Name"]
return tags
def make_ec2_client(region, max_retries, aws_credentials=None):
"""Make client, retrying requests up to `max_retries`."""
config = Config(retries={"max_attempts": max_retries})
aws_credentials = aws_credentials or {}
return boto3.resource(
"ec2", region_name=region, config=config, **aws_credentials)
class AWSNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
aws_credentials = provider_config.get("aws_credentials")
self.ec2 = make_ec2_client(
region=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
aws_credentials=aws_credentials)
self.ec2_fail_fast = make_ec2_client(
region=provider_config["region"],
max_retries=0,
aws_credentials=aws_credentials)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
self.tag_cache = {} # Tags that we believe to actually be on EC2.
self.tag_cache_pending = {} # Tags that we will soon upload.
self.tag_cache_lock = threading.Lock()
self.tag_cache_update_event = threading.Event()
self.tag_cache_kill_event = threading.Event()
self.tag_update_thread = threading.Thread(
target=self._node_tag_update_loop)
self.tag_update_thread.start()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def _node_tag_update_loop(self):
"""Update the AWS tags for a cluster periodically.
The purpose of this loop is to avoid excessive EC2 calls when a large
number of nodes are being launched simultaneously.
"""
while True:
self.tag_cache_update_event.wait()
self.tag_cache_update_event.clear()
batch_updates = defaultdict(list)
with self.tag_cache_lock:
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id].update(tags)
self.tag_cache_pending = {}
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AWSNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.ec2.meta.client.create_tags(
Resources=node_ids,
Tags=[{
"Key": k,
"Value": v
}],
)
self.tag_cache_kill_event.wait(timeout=5)
if self.tag_cache_kill_event.is_set():
return
def non_terminated_nodes(self, tag_filters):
# Note that these filters are acceptable because they are set on
# node initialization, and so can never be sitting in the cache.
tag_filters = to_aws_format(tag_filters)
filters = [
{
"Name": "instance-state-name",
"Values": ["pending", "running"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
]
for k, v in tag_filters.items():
filters.append({
"Name": "tag:{}".format(k),
"Values": [v],
})
with boto_exception_handler(
"Failed to fetch running instances from AWS."):
nodes = list(self.ec2.instances.filter(Filters=filters))
# Populate the tag cache with initial information if necessary
for node in nodes:
if node.id in self.tag_cache:
continue
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
self.cached_nodes = {node.id: node for node in nodes}
return [node.id for node in nodes]
def is_running(self, node_id):
node = self._get_cached_node(node_id)
return node.state["Name"] == "running"
def is_terminated(self, node_id):
node = self._get_cached_node(node_id)
state = node.state["Name"]
return state not in ["running", "pending"]
def node_tags(self, node_id):
with self.tag_cache_lock:
d1 = self.tag_cache[node_id]
d2 = self.tag_cache_pending.get(node_id, {})
return dict(d1, **d2)
def external_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.public_ip_address is None:
node = self._get_node(node_id)
return node.public_ip_address
def internal_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.private_ip_address is None:
node = self._get_node(node_id)
return node.private_ip_address
def set_node_tags(self, node_id, tags):
with self.tag_cache_lock:
try:
self.tag_cache_pending[node_id].update(tags)
except KeyError:
self.tag_cache_pending[node_id] = tags
self.tag_cache_update_event.set()
def create_node(self, node_config, tags, count):
tags = copy.deepcopy(tags)
# Try to reuse previously stopped nodes with compatible configs
if self.cache_stopped_nodes:
# TODO(ekl) this is breaking the abstraction boundary a little by
# peeking into the tag set.
filters = [
{
"Name": "instance-state-name",
"Values": ["stopped", "stopping"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
{
"Name": "tag:{}".format(TAG_RAY_NODE_KIND),
"Values": [tags[TAG_RAY_NODE_KIND]],
},
{
"Name": "tag:{}".format(TAG_RAY_LAUNCH_CONFIG),
"Values": [tags[TAG_RAY_LAUNCH_CONFIG]],
},
]
# This tag may not always be present.
if TAG_RAY_USER_NODE_TYPE in tags:
filters.append({
"Name": "tag:{}".format(TAG_RAY_USER_NODE_TYPE),
"Values": [tags[TAG_RAY_USER_NODE_TYPE]],
})
reuse_nodes = list(
self.ec2.instances.filter(Filters=filters))[:count]
reuse_node_ids = [n.id for n in reuse_nodes]
if reuse_nodes:
cli_logger.print(
# todo: handle plural vs singular?
"Reusing nodes {}. "
"To disable reuse, set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration.",
cli_logger.render_list(reuse_node_ids))
cli_logger.old_info(
logger, "AWSNodeProvider: reusing instances {}. "
"To disable reuse, set "
"'cache_stopped_nodes: False' in the provider "
"config.", reuse_node_ids)
# todo: timed?
with cli_logger.group("Stopping instances to reuse"):
for node in reuse_nodes:
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
if node.state["Name"] == "stopping":
cli_logger.print("Waiting for instance {} to stop",
node.id)
cli_logger.old_info(
logger,
"AWSNodeProvider: waiting for instance "
"{} to fully stop...", node.id)
node.wait_until_stopped()
self.ec2.meta.client.start_instances(
InstanceIds=reuse_node_ids)
for node_id in reuse_node_ids:
self.set_node_tags(node_id, tags)
count -= len(reuse_node_ids)
if count:
self._create_node(node_config, tags, count)
def _create_node(self, node_config, tags, count):
tags = to_aws_format(tags)
conf = node_config.copy()
# Delete unsupported keys from the node config
try:
del conf["Resources"]
except KeyError:
pass
tag_pairs = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}]
for k, v in tags.items():
tag_pairs.append({
"Key": k,
"Value": v,
})
tag_specs = [{
"ResourceType": "instance",
"Tags": tag_pairs,
}]
user_tag_specs = conf.get("TagSpecifications", [])
# Allow users to add tags and override values of existing
# tags with their own. This only applies to the resource type
# "instance". All other resource types are appended to the list of
# tag specs.
for user_tag_spec in user_tag_specs:
if user_tag_spec["ResourceType"] == "instance":
for user_tag in user_tag_spec["Tags"]:
exists = False
for tag in tag_specs[0]["Tags"]:
if user_tag["Key"] == tag["Key"]:
exists = True
tag["Value"] = user_tag["Value"]
break
if not exists:
tag_specs[0]["Tags"] += [user_tag]
else:
tag_specs += [user_tag_spec]
# SubnetIds is not a real config key: we must resolve to a
# single SubnetId before invoking the AWS API.
subnet_ids = conf.pop("SubnetIds")
for attempt in range(1, BOTO_CREATE_MAX_RETRIES + 1):
try:
subnet_id = subnet_ids[self.subnet_idx % len(subnet_ids)]
cli_logger.old_info(
logger, "NodeProvider: calling create_instances "
"with {} (count={}).", subnet_id, count)
self.subnet_idx += 1
conf.update({
"MinCount": 1,
"MaxCount": count,
"SubnetId": subnet_id,
"TagSpecifications": tag_specs
})
created = self.ec2_fail_fast.create_instances(**conf)
# todo: timed?
# todo: handle plurality?
with cli_logger.group(
"Launched {} nodes",
count,
_tags=dict(subnet_id=subnet_id)):
for instance in created:
# NOTE(maximsmol): This is needed for mocking
# boto3 for tests. This is likely a bug in moto
# but AWS docs don't seem to say.
# You can patch moto/ec2/responses/instances.py
# to fix this (add <stateReason> to EC2_RUN_INSTANCES)
# The correct value is technically
# {"code": "0", "Message": "pending"}
state_reason = instance.state_reason or {
"Message": "pending"
}
cli_logger.print(
"Launched instance {}",
instance.instance_id,
_tags=dict(
state=instance.state["Name"],
info=state_reason["Message"]))
cli_logger.old_info(
logger, "NodeProvider: Created instance "
"[id={}, name={}, info={}]", instance.instance_id,
instance.state["Name"], state_reason["Message"])
break
except botocore.exceptions.ClientError as exc:
if attempt == BOTO_CREATE_MAX_RETRIES:
# todo: err msg
cli_logger.abort(
"Failed to launch instances. Max attempts exceeded.")
cli_logger.old_error(
logger,
"create_instances: Max attempts ({}) exceeded.",
BOTO_CREATE_MAX_RETRIES)
raise exc
else:
cli_logger.print(
"create_instances: Attempt failed with {}, retrying.",
exc)
cli_logger.old_error(logger, exc)
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
cli_logger.print(
"Terminating instance {} " +
cf.gray("(cannot stop spot instances, only terminate)"),
node_id) # todo: show node name?
cli_logger.old_info(
logger,
"AWSNodeProvider: terminating node {} (spot nodes cannot "
"be stopped, only terminated)", node_id)
node.terminate()
else:
cli_logger.print("Stopping instance {} " + cf.gray(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"),
node_id) # todo: show node name?
cli_logger.old_info(
logger,
"AWSNodeProvider: stopping node {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.".format(node_id))
node.stop()
else:
node.terminate()
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def terminate_nodes(self, node_ids):
if not node_ids:
return
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
# todo: show node names?
cli_logger.print(
"Stopping instances {} " + cf.gray(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"),
cli_logger.render_list(on_demand_ids))
cli_logger.old_info(
logger,
"AWSNodeProvider: stopping nodes {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.", on_demand_ids)
self.ec2.meta.client.stop_instances(InstanceIds=on_demand_ids)
if spot_ids:
cli_logger.print(
"Terminating instances {} " +
cf.gray("(cannot stop spot instances, only terminate)"),
cli_logger.render_list(spot_ids))
cli_logger.old_info(
logger,
"AWSNodeProvider: terminating nodes {} (spot nodes cannot "
"be stopped, only terminated)", spot_ids)
self.ec2.meta.client.terminate_instances(InstanceIds=spot_ids)
else:
self.ec2.meta.client.terminate_instances(InstanceIds=node_ids)
for node_id in node_ids:
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
def cleanup(self):
self.tag_cache_update_event.set()
self.tag_cache_kill_event.set()
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aws(cluster_config)
|
AstroLauncher.py | import argparse
import asyncio
import atexit
import ctypes
import dataclasses
import json
import os
import secrets
import shutil
import signal
import socket
import subprocess
import sys
import time
import zipfile
from distutils import dir_util
from threading import Thread
import psutil
from packaging import version
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import cogs.AstroAPI as AstroAPI
import cogs.AstroWebServer as AstroWebServer
import cogs.ValidateSettings as ValidateSettings
from cogs.AstroDaemon import AstroDaemon
from cogs.AstroDedicatedServer import AstroDedicatedServer
from cogs.AstroLogging import AstroLogging
from cogs.MultiConfig import MultiConfig
from cogs.utils import AstroRequests
from cogs.utils import ALVERSION
"""
Build:
pyinstaller AstroLauncher.py -F --add-data "assets;./assets" --icon=assets/astrolauncherlogo.ico
or
python BuildEXE.py
"""
class AstroLauncher():
""" Starts a new instance of the Server Launcher"""
@dataclasses.dataclass
class LauncherConfig():
AutoUpdateLauncherSoftware: bool = True
AutoUpdateServerSoftware: bool = True
UpdateOnServerRestart: bool = True
HideServerConsoleWindow: bool = False
HideLauncherConsoleWindow: bool = False
ServerStatusFrequency: float = 2
PlayfabAPIFrequency: float = 2
HeartBeatFailRestartServer: int = 8
DisableBackupRetention: bool = False
BackupRetentionPeriodHours: float = 72
BackupRetentionFolderLocation: str = r"Astro\Saved\Backup\LauncherBackups"
EnableAutoRestart: bool = False
AutoRestartEveryHours: float = 24
AutoRestartSyncTimestamp: str = "00:00"
DisableNetworkCheck: bool = False
OverwritePublicIP: bool = False
ShowServerFPSInConsole: bool = True
AdminAutoConfigureFirewall: bool = True
LogRetentionDays: int = 7
DiscordWebHookURL: str = ""
DiscordWebHookLevel: str = "cmd"
RODataURL: str = secrets.token_hex(16)
DisableWebServer: bool = False
WebServerPort: int = 5000
WebServerPasswordHash: str = ""
WebServerBaseURL: str = "/"
EnableWebServerSSL: bool = False
SSLPort: int = 443
SSLCertFile: str = ""
SSLKeyFile: str = ""
CPUAffinity: str = ""
def __post_init__(self):
# pylint: disable=no-member
hasError = False
for field, data in self.__dataclass_fields__.items():
try:
self.__dict__[field] = data.type(self.__dict__[field])
except ValueError:
hasError = True
AstroLogging.logPrint(
f"INI error: {field} must be of type {data.type.__name__}", "critical")
if hasError:
AstroLogging.logPrint(
"Fix your launcher config file!", "critical")
sys.exit()
class SaveHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
super().__init__()
def on_created(self, event):
# print(event)
# time.sleep(1)
try:
time.sleep(0.5)
dirName = os.path.dirname(event.src_path)
fileNames = [os.path.join(dirName, f) for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))]
# print(fileNames)
fileName = sorted(
fileNames, key=os.path.getmtime, reverse=True)[0]
AstroLogging.logPrint(
f"Server saved. {os.path.basename(fileName)}", dwet="s")
AstroLogging.logPrint(f"{event.src_path}")
except:
pass
# self.launcher.saveObserver.stop()
def on_deleted(self, event):
AstroLogging.logPrint(
f"Detected save file deletion. {event.src_path}", msgType="debug")
class BackupHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
self.retentionPeriodHours = self.launcher.launcherConfig.BackupRetentionPeriodHours
self.pendingFiles = []
super().__init__()
def handle_files(self):
# print(f"first: {self.pendingFiles}")
time.sleep(2)
# print(f"second: {self.pendingFiles}")
# AstroLogging.logPrint("DEBUG: INSIDE THREAD")
path = os.path.join(self.astroPath, self.moveToPath)
try:
if not os.path.exists(path):
os.makedirs(path)
except Exception as e:
AstroLogging.logPrint(e, "error")
now = time.time()
try:
for f in os.listdir(path):
fpath = os.path.join(path, f)
if os.stat(fpath).st_mtime < (now - (self.retentionPeriodHours * 60 * 60)):
os.remove(fpath)
except Exception as e:
AstroLogging.logPrint(e, "error")
AstroLogging.logPrint(
"Copying backup(s) to retention folder.", dwet="b")
# time.sleep(1)
try:
dirName = os.path.dirname(self.pendingFiles[0])
fileNames = [os.path.join(dirName, f) for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))]
for cFile in fileNames:
# AstroLogging.logPrint(newFile, "debug")
# print(cFile)
shutil.copy2(cFile, path)
# AstroLogging.logPrint(copiedFile, "debug")
except FileNotFoundError as e:
AstroLogging.logPrint(e, "error")
except Exception as e:
AstroLogging.logPrint(e, "error")
self.launcher.backupObserver.stop()
self.launcher.backup_retention()
def on_deleted(self, event):
# AstroLogging.logPrint(event)
# AstroLogging.logPrint("File in save directory changed")
# AstroLogging.logPrint("DEBUG: File modified.. Starting thread")
try:
self.pendingFiles.append(event.src_path)
if len(self.pendingFiles) == 1:
t = Thread(target=self.handle_files, args=())
t.daemon = True
t.start()
except:
pass
def __init__(self, astroPath, launcherINI="Launcher.ini", disable_auto_update=None):
try:
AstroLogging.setup_logging()
self.launcherINI = launcherINI
self.launcherConfig = self.LauncherConfig()
self.launcherPath = os.getcwd()
self.refresh_launcher_config()
# check if path specified
if astroPath is not None:
if os.path.exists(os.path.join(astroPath, "AstroServer.exe")):
self.astroPath = astroPath
else:
AstroLogging.logPrint(
"Specified path does not contain the server executable! (AstroServer.exe)", "critical")
time.sleep(5)
return
# check if executable in current directory
elif os.path.exists(os.path.join(os.getcwd(), "AstroServer.exe")):
self.astroPath = os.getcwd()
else:
AstroLogging.logPrint(
"Unable to find server executable anywhere! (AstroServer.exe)", "warning")
# finally, try to install the server
try:
if astroPath is None:
self.astroPath = os.getcwd()
self.check_for_server_update()
except Exception as e:
AstroLogging.logPrint(e, "critical")
return
# AstroRequests.checkProxies()
AstroLogging.discordWebhookURL = self.launcherConfig.DiscordWebHookURL
dwhl = self.launcherConfig.DiscordWebHookLevel.lower()
dwhl = dwhl if dwhl in ("all", "cmd", "chat") else "cmd"
AstroLogging.discordWebhookLevel = dwhl
self.start_WebHookLoop()
AstroLogging.setup_loggingPath(
astroPath=self.astroPath, logRetention=int(self.launcherConfig.LogRetentionDays))
if disable_auto_update is not None:
self.launcherConfig.AutoUpdateLauncherSoftware = not disable_auto_update
self.version = ALVERSION
colsize = os.get_terminal_size().columns
if colsize >= 77:
vText = "Version " + self.version[1:]
# pylint: disable=anomalous-backslash-in-string
print(" __________________________________________________________________________\n" +
"| _ _ _ _ |\n" +
"| /_\\ ___| |_ _ _ ___ | | __ _ _ _ _ _ __ | |_ ___ _ _ |\n" +
"| / _ \\ (_-<| _|| '_|/ _ \\ | |__ / _` || || || ' \\ / _|| ' \\ / -_)| '_| |\n" +
"| /_/ \\_\\/__/ \\__||_| \\___/ |____|\\__,_| \\_,_||_||_|\\__||_||_|\\___||_| |\n" +
"| |\n" +
"|"+vText.center(74)+"|\n" +
"|__________________________________________________________________________|")
AstroLogging.logPrint(
f"AstroLauncher - Unofficial Dedicated Server Launcher {self.version}")
AstroLogging.logPrint(
"If you encounter any bugs please open a new issue at:")
AstroLogging.logPrint(
"https://github.com/ricky-davis/AstroLauncher/issues")
AstroLogging.logPrint(
"To safely stop the launcher and server press CTRL+C")
self.latestURL = "https://github.com/ricky-davis/AstroLauncher/releases/latest"
bName = os.path.basename(sys.executable)
if sys.argv[0] == os.path.splitext(bName)[0]:
self.isExecutable = True
else:
self.isExecutable = os.path.samefile(
sys.executable, sys.argv[0])
self.cur_server_version = "0.0"
self.headers = AstroAPI.base_headers
self.DaemonProcess = None
self.saveObserver = None
self.backupObserver = None
self.hasUpdate = False
self.is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
self.affinity = self.launcherConfig.CPUAffinity
try:
if self.affinity != "":
affinityList = [int(x.strip())
for x in self.affinity.split(',')]
p = psutil.Process()
p.cpu_affinity(affinityList)
except ValueError as e:
AstroLogging.logPrint(f"CPU Affinity Error: {e}", "critical")
AstroLogging.logPrint(
"Please correct this in your launcher config", "critical")
return
self.check_for_server_update()
self.DedicatedServer = AstroDedicatedServer(
self.astroPath, self)
self.check_for_launcher_update()
AstroLogging.logPrint("Starting a new session")
self.validate_playfab_certs()
self.check_ports_free()
if self.launcherConfig.AdminAutoConfigureFirewall:
self.configure_firewall()
if not self.launcherConfig.DisableNetworkCheck:
AstroLogging.logPrint("Checking the network configuration..")
self.check_network_config()
self.save_reporting()
if not self.launcherConfig.DisableBackupRetention:
self.backup_retention()
AstroLogging.logPrint("Backup retention started")
# setup queue for data exchange
self.webServer = None
if not self.launcherConfig.DisableWebServer:
# start http server
self.webServer = self.start_WebServer()
self.start_InfoLoop()
# AstroLogging.logPrint(
# f"HTTP Server started at 127.0.0.1:{self.launcherConfig.WebServerPort}")
if self.launcherConfig.HideLauncherConsoleWindow:
# hide window
AstroLogging.logPrint(
"HideLauncherConsoleWindow enabled, Hiding window in 5 seconds...")
time.sleep(5)
# pylint: disable=redefined-outer-name
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
hWnd = kernel32.GetConsoleWindow()
user32.ShowWindow(hWnd, 0)
self.start_server(firstLaunch=True)
except Exception as err:
ermsg2 = ('INIT Error on line {}'.format(
sys.exc_info()[-1].tb_lineno), type(err).__name__, err)
AstroLogging.logPrint(f"{ermsg2}", "critical", True)
def save_reporting(self):
if self.saveObserver:
if not self.saveObserver.is_alive():
self.saveObserver = None
self.save_reporting()
else:
self.saveObserver = Observer()
saveGamePath = r"Astro\Saved\SaveGames"
watchPath = os.path.join(
self.astroPath, saveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.saveObserver.schedule(
self.SaveHandler(self), watchPath)
self.saveObserver.start()
def backup_retention(self):
if self.backupObserver:
if not self.backupObserver.is_alive():
self.backupObserver = None
self.backup_retention()
else:
self.backupObserver = Observer()
backupSaveGamePath = r"Astro\Saved\Backup\SaveGames"
watchPath = os.path.join(
self.astroPath, backupSaveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.backupObserver.daemon = True
self.backupObserver.schedule(
self.BackupHandler(self), watchPath)
self.backupObserver.start()
def refresh_launcher_config(self, lcfg=None):
field_names = set(
f.name for f in dataclasses.fields(self.LauncherConfig))
cleaned_config = {k: v for k,
v in self.get_launcher_config(lcfg).items() if k in field_names}
self.launcherConfig = dataclasses.replace(
self.launcherConfig, **cleaned_config)
config = MultiConfig()
config.read_dict({"AstroLauncher": cleaned_config})
with open(self.launcherINI, 'w') as configfile:
config.write(configfile)
def overwrite_launcher_config(self, ovrDict):
ovrConfig = {
"AstroLauncher": ovrDict
}
MultiConfig().overwrite_with(self.launcherINI, ovrConfig)
def get_launcher_config(self, lfcg=None):
if not lfcg:
lfcg = self.LauncherConfig()
baseConfig = {
"AstroLauncher": dataclasses.asdict(lfcg)
}
config = MultiConfig().baseline(self.launcherINI, baseConfig)
# print(settings)
settings = config.getdict()['AstroLauncher']
return settings
def validate_playfab_certs(self):
try:
AstroLogging.logPrint("Attempting to validate Playfab Certs")
playfabRequestCommand = ["powershell", '-executionpolicy', 'bypass', '-command',
'Invoke-WebRequest -uri https://5ea1.playfabapi.com/ -UseBasicParsing']
with open(os.devnull, 'w') as tempf:
proc = subprocess.Popen(
playfabRequestCommand, stdout=tempf, stderr=tempf)
proc.communicate()
except Exception as err:
ermsg3 = ('VerifyPlayfabCert Error on line {}'.format(
sys.exc_info()[-1].tb_lineno), type(err).__name__, err)
AstroLogging.logPrint(f"{ermsg3}", "warning", True)
def update_server(self, latest_version):
updateLocation = os.path.join(
self.astroPath, 'steamcmd', 'steamapps', 'common', 'ASTRONEER Dedicated Server')
steamcmdFolder = os.path.join(self.astroPath, "steamcmd")
steamcmdExe = os.path.join(steamcmdFolder, "steamcmd.exe")
steamcmdZip = os.path.join(self.astroPath, "steamcmd.zip")
try:
if not os.path.exists(steamcmdFolder):
if not os.path.exists(steamcmdExe):
if not os.path.exists(steamcmdZip):
url = "https://steamcdn-a.akamaihd.net/client/installer/steamcmd.zip"
r = (AstroRequests.get(url)).read()
with open(steamcmdZip, 'wb') as f:
f.write(r)
with zipfile.ZipFile(steamcmdZip, 'r') as zip_ref:
zip_ref.extractall(steamcmdFolder)
update_downloaded = False
if os.path.exists(updateLocation):
upd_version = "0.0"
try:
with open(os.path.join(updateLocation, "build.version"), "r") as f:
upd_version = (f.readline())[:-10]
if upd_version == latest_version:
update_downloaded = True
except:
try:
shutil.rmtree(updateLocation)
except:
pass
if not update_downloaded:
open("update.p", "wb").write(b"download")
if os.path.exists(steamcmdExe):
try:
os.remove(steamcmdZip)
except:
pass
AstroLogging.logPrint(
f"AUTOMATICALLY UPDATING SERVER TO {latest_version}...")
try:
updateCMD = [steamcmdExe, '+login anonymous',
'+app_update 728470', 'validate', '+quit']
update = subprocess.Popen(
updateCMD, creationflags=subprocess.DETACHED_PROCESS)
while update.poll() is None:
time.sleep(0.1)
except Exception as e:
for child in psutil.Process(update.pid).children():
try:
child.kill()
except:
pass
try:
update.kill()
except:
pass
raise Exception("") from e
upd_version = "0.0"
try:
with open(os.path.join(updateLocation, "build.version"), "r") as f:
upd_version = (f.readline())[:-10]
except:
pass
if upd_version == latest_version:
update_downloaded = True
if update_downloaded:
open("update.p", "wb").write(b"transfer")
dir_util.copy_tree(updateLocation, self.astroPath)
open("update.p", "wb").write(b"complete")
cur_version = "0.0"
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
if cur_version == latest_version:
AstroLogging.logPrint(
f"UPDATE TO {latest_version} SUCCESSFUL.")
steamcmdZip = os.path.join(self.astroPath, "steamcmd.zip")
if os.path.exists(steamcmdZip):
os.remove(steamcmdZip)
try:
os.remove("update.p")
except:
pass
try:
shutil.rmtree(updateLocation)
except:
pass
except: # Exception as e:
AstroLogging.logPrint(
f"UPDATE TO {latest_version} FAILED.", "warning")
def check_for_server_update(self, serverStart=False, check_only=False):
try:
# print('here1')
if not self.launcherConfig.UpdateOnServerRestart and serverStart:
return
else:
# print('here2')
needs_update = False
update_status = None
if os.path.exists("update.p"):
with open("update.p", "r") as f:
update_status = f.read()
if update_status != "completed":
needs_update = True
# print('here3')
cur_version = "0.0"
try:
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
except:
pass
# print(cur_version)
# print('here4')
if cur_version == "0.0":
needs_update = True
url = "https://servercheck.spycibot.com/stats"
data = json.load(AstroRequests.get(url))
# print(data)
# print('here6')
latest_version = data['LatestVersion']
if version.parse(latest_version) > version.parse(cur_version):
needs_update = True
if not os.path.exists(os.path.join(self.astroPath, "AstroServer.exe")):
needs_update = True
if needs_update:
AstroLogging.logPrint(
f"SERVER UPDATE AVAILABLE: {cur_version} -> {latest_version}", "warning")
# print('here7')
if self.launcherConfig.AutoUpdateServerSoftware and not check_only:
self.update_server(latest_version)
# print('here8')
return True, latest_version
cur_version = "0.0"
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
self.cur_server_version = cur_version
# print('here9')
except Exception as e:
print(e)
AstroLogging.logPrint(
"Failed to check if update is available", "warning")
return False, "0.0"
def check_for_launcher_update(self, serverStart=False):
try:
url = "https://api.github.com/repos/ricky-davis/AstroLauncher/releases/latest"
data = json.load((AstroRequests.get(url)))
latestVersion = data['tag_name']
if version.parse(latestVersion) > version.parse(self.version):
self.hasUpdate = latestVersion
AstroLogging.logPrint(
f"UPDATE: There is a newer version of the launcher out! {latestVersion}")
AstroLogging.logPrint(f"Download it at {self.latestURL}")
aupdate = self.launcherConfig.AutoUpdateLauncherSoftware
if not self.launcherConfig.UpdateOnServerRestart and serverStart:
return
if self.isExecutable and aupdate:
self.autoupdate_launcher(data)
except:
AstroLogging.logPrint(
"Could not determine if new update exists.", msgType="debug")
def autoupdate_launcher(self, data):
x = data
downloadFolder = os.path.dirname(sys.executable)
for fileObj in x['assets']:
downloadURL = fileObj['browser_download_url']
fileName = (os.path.splitext(fileObj['name'])[0])
downloadPath = os.path.join(downloadFolder, fileName)
downloadCMD = ["powershell", '-executionpolicy', 'bypass', '-command',
'Write-Host "Downloading latest AstroLauncher.exe..";', 'wait-process', str(
os.getpid()), ';',
'[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;',
"$ProgressPreference = 'SilentlyContinue';",
'Invoke-WebRequest', f"'{downloadURL}'", "-OutFile", f"'{downloadPath + '_new.exe'}'", ';',
"Move-Item", "-path", f"'{downloadPath + '_new.exe'}'", "-destination", f"'{downloadPath + '.exe'}'", "-Force;",
'Write-Host "Download complete!";',
'Start-Process', f"'{downloadPath + '.exe'}'"]
# print(' '.join(downloadCMD))
subprocess.Popen(downloadCMD, shell=True, creationflags=subprocess.DETACHED_PROCESS,
stdin=None, stdout=None, stderr=None, close_fds=True)
time.sleep(2)
self.DedicatedServer.kill_server("Auto-Update")
# pylint: disable=unused-argument
def signal_handler(self, sig, frame):
self.DedicatedServer.kill_server(
reason="Launcher shutting down via signal", save=True)
def start_server(self, firstLaunch=False):
"""
Starts the Dedicated Server process and waits for it to be registered
"""
if firstLaunch:
atexit.register(self.DedicatedServer.kill_server,
reason="Launcher shutting down via exit",
save=True)
signal.signal(signal.SIGINT, self.signal_handler)
else:
self.check_for_server_update(serverStart=True)
self.check_for_launcher_update(serverStart=True)
self.DedicatedServer = AstroDedicatedServer(
self.astroPath, self)
self.DedicatedServer.status = "starting"
self.DedicatedServer.busy = False
gxAuth = None
while gxAuth is None:
try:
gxAuth = AstroAPI.generate_XAUTH(
self.DedicatedServer.settings.ServerGuid)
except:
AstroLogging.logPrint(
"Unable to generate XAuth token... Are you connected to the internet?", msgType="warning")
time.sleep(5)
self.headers['X-Authorization'] = gxAuth
oldLobbyIDs = self.DedicatedServer.deregister_all_server()
AstroLogging.logPrint("Starting Server process...")
if self.launcherConfig.EnableAutoRestart:
AstroLogging.logPrint(
f"Next restart is at {self.DedicatedServer.nextRestartTime}")
# time.sleep(5)
startTime = time.time()
try:
self.DedicatedServer.start()
except:
AstroLogging.logPrint(
"Unable to launch AstroServer.exe", "critical")
return False
reachableProcess = None
pcounter = 40
while not reachableProcess:
try:
reachableProcess = not bool(
self.DedicatedServer.process.poll())
pcounter -= 1
time.sleep(0.25)
except:
pcounter -= 2
time.sleep(0.5)
if pcounter <= 0:
AstroLogging.logPrint(
"Unable to start Server Process after 10 seconds!", "critical")
return False
AstroLogging.logPrint(
f"Server started ( {self.cur_server_version} )! Getting ready....", ovrDWHL=True)
try:
self.DaemonProcess = AstroDaemon.launch(
executable=self.isExecutable, consolePID=self.DedicatedServer.process.pid)
except:
AstroLogging.logPrint(
"Unable to start watcher daemon", "warning")
return False
# Wait for server to finish registering...
serverData = None
oPFF = self.launcherConfig.PlayfabAPIFrequency
while not self.DedicatedServer.registered:
AstroLogging.logPrint("Waiting for server to register...", "debug")
try:
serverData = (AstroAPI.get_server(
self.DedicatedServer.ipPortCombo, self.headers))
serverData = serverData['data']['Games']
lobbyIDs = [x['LobbyID'] for x in serverData]
if len(set(lobbyIDs) - set(oldLobbyIDs)) == 0:
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
else:
now = time.time()
if now - startTime > 15:
serverData = serverData[0]
self.DedicatedServer.registered = True
oldLobbyIDs = None
self.DedicatedServer.LobbyID = serverData['LobbyID']
if self.DedicatedServer.process.poll() is not None:
AstroLogging.logPrint(
"Server was forcefully closed before registration. Exiting....")
return False
except KeyboardInterrupt:
self.DedicatedServer.kill_server(
"Launcher shutting down via KeyboardInterrupt")
except:
AstroLogging.logPrint(
"Failed to check server. Probably hit rate limit. Backing off and trying again...")
if self.launcherConfig.PlayfabAPIFrequency < 30:
self.launcherConfig.PlayfabAPIFrequency += 1
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
self.launcherConfig.PlayfabAPIFrequency = oPFF
self.DedicatedServer.serverData = serverData
doneTime = time.time()
elapsed = doneTime - startTime
# AstroLogging.logPrint("This is to show we're in the debug AstroLauncher version", "debug")
AstroLogging.logPrint(
f"Server ready! Took {round(elapsed,2)} seconds to register.", ovrDWHL=True) # {self.DedicatedServer.LobbyID}
self.DedicatedServer.status = "ready"
# AstroLogging.logPrint("Starting server_loop: 1", "debug")
self.DedicatedServer.server_loop()
def check_ports_free(self):
serverPort = False
sp = int(self.DedicatedServer.settings.Port)
consolePort = False
cp = int(self.DedicatedServer.settings.ConsolePort)
webPort = False
wp = int(self.launcherConfig.WebServerPort)
def is_port_in_use(port, tcp=True):
lc = psutil.net_connections('inet')
lc = [x for x in lc if x.type == (
socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM) and x.laddr[1] == port]
return len(lc) > 0
serverPort = bool(is_port_in_use(sp, False))
consolePort = bool(is_port_in_use(cp))
if not self.launcherConfig.DisableWebServer:
webPort = bool(is_port_in_use(wp))
if serverPort:
AstroLogging.logPrint(
f"A process is already using your Server Port ( {sp} UDP )", "critical")
if consolePort:
AstroLogging.logPrint(
f"A process is already using your Console Port ( {cp} TCP )", "critical")
if webPort:
AstroLogging.logPrint(
f"A process is already using your Web Port ( {wp} TCP )", "critical")
if serverPort or consolePort or webPort:
self.kill_launcher()
def configure_firewall(self):
if not self.launcherConfig.AdminAutoConfigureFirewall:
return
ALRule = None
ALWRule = None
ASRule = None
launcherEXEPath = None
isFirewallEnabled = None
with os.popen(
'netsh advfirewall show currentprofile | findstr /L "State" | findstr /L "ON"') as fwCheck:
isFirewallEnabled = fwCheck.read()
if isFirewallEnabled:
serverExePath = os.path.join(
self.astroPath, 'astro\\binaries\\win64\\astroserver-win64-shipping.exe')
ASRule = os.popen(
f'netsh advfirewall firewall show rule name=astroserver-win64-shipping.exe verbose | findstr /L "{serverExePath}"').read()
if self.isExecutable:
launcherEXEPath = os.path.join(os.getcwd(), sys.argv[0])
ALRule = os.popen(
f'netsh advfirewall firewall show rule name=astrolauncher.exe verbose | findstr /L "{launcherEXEPath}"').read()
if not self.launcherConfig.DisableWebServer:
ALWRule = os.popen(
f'netsh advfirewall firewall show rule name=AstroLauncherWeb | findstr /L "{self.launcherConfig.WebServerPort}"').read()
if not self.is_admin:
if (not ASRule)\
or (self.isExecutable and not ALRule)\
or (not self.launcherConfig.DisableWebServer and self.isExecutable and not ALWRule):
AstroLogging.logPrint(
"Could not find firewall settings! Please relaunch as Administrator.", "warning")
else:
newRules = False
if not ASRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=astroserver-win64-shipping.exe dir=in program="{serverExePath}"' +
f'& netsh advfirewall firewall add rule name=astroserver-win64-shipping.exe dir=in action=allow program="{serverExePath}"',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if self.isExecutable:
if not ALRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=astrolauncher.exe dir=in program="{launcherEXEPath}"' +
f'& netsh advfirewall firewall add rule name=astrolauncher.exe dir=in action=allow program="{launcherEXEPath}"',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if not self.launcherConfig.DisableWebServer and not ALWRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=AstroLauncherWeb dir=in protocol=TCP localport={self.launcherConfig.WebServerPort}' +
f'& netsh advfirewall firewall add rule name=AstroLauncherWeb dir=in action=allow protocol=TCP localport={self.launcherConfig.WebServerPort}',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if newRules:
AstroLogging.logPrint(
"Setting custom firewall rules...")
def check_network_config(self):
localTest = ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.Port), False)
remoteTest = ValidateSettings.test_nonlocal(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.Port))
testMatrix = [localTest, remoteTest]
if testMatrix == [True, True]:
AstroLogging.logPrint("Server network configuration good!")
elif testMatrix == [False, True]:
AstroLogging.logPrint(
"Your server is not accessible from your local network.", "warning")
AstroLogging.logPrint(
"This usually indicates an issue with NAT Loopback", "warning")
AstroLogging.logPrint(
"See if your router supports it, or setup your server with playit.gg", "warning")
AstroLogging.logPrint(
"Guide to setting up playit.gg (11:28): https://youtu.be/SdLNFowq8WI?t=688", "warning")
elif testMatrix == [True, False]:
AstroLogging.logPrint(
"Your server can be seen locally, but not remotely.", "warning")
AstroLogging.logPrint(
"This usually means you have a Loopback adapter that needs to be disabled", "warning")
AstroLogging.logPrint(
"and that you may need to Port Forward/open your firewall.", "warning")
elif testMatrix == [False, False]:
AstroLogging.logPrint(
"The server is completely unreachable!", "warning")
AstroLogging.logPrint(
f"Please port forward {self.DedicatedServer.settings.Port} UDP and ensure the firewall settings are correct.", "warning")
rconNetworkCorrect = not (ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.ConsolePort), True))
if rconNetworkCorrect:
AstroLogging.logPrint("Remote Console network configuration good!")
else:
AstroLogging.logPrint(
f"SECURITY ALERT: Your console port ({self.DedicatedServer.settings.ConsolePort}) is Port Forwarded!", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: This allows anybody to control your server.", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: Disable this ASAP to prevent issues.", "warning")
time.sleep(5)
def start_WebServer(self):
ws = AstroWebServer.WebServer(self)
def start_WebServerThread():
if sys.version_info.minor > 7:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.set_event_loop(asyncio.new_event_loop())
ws.run()
t = Thread(target=start_WebServerThread, args=())
t.daemon = True
t.start()
return ws
def autoUpdate_websockets_Loop(self):
while True:
time.sleep(1)
self.webServer.iterWebSocketConnections()
def start_InfoLoop(self):
def start_InfoLoopThread(self):
if sys.version_info.minor > 7:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.set_event_loop(asyncio.new_event_loop())
self.autoUpdate_websockets_Loop()
t = Thread(target=start_InfoLoopThread, args=(self,))
t.daemon = True
t.start()
def kill_launcher(self):
time.sleep(5)
try:
for child in psutil.Process(os.getpid()).children():
child.kill()
except:
pass
# Kill current process
try:
os.kill(os.getpid(), 9)
except:
pass
def start_WebHookLoop(self):
t = Thread(target=AstroLogging.sendDiscordReqLoop, args=())
t.daemon = True
t.start()
if __name__ == "__main__":
try:
os.system("title AstroLauncher - Unofficial Dedicated Server Launcher")
except:
pass
try:
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--daemon", dest="daemon",
help="Set the launcher to run as a Daemon", action='store_true')
parser.add_argument(
"-c", "--consolepid", help="Set the consolePID for the Daemon", type=str.lower)
parser.add_argument(
"-l", "--launcherpid", help="Set the launcherPID for the Daemon", type=str.lower)
parser.add_argument(
"-p", "--path", help="Set the server folder path", type=str.lower)
parser.add_argument("-U", "--noupdate", dest="noautoupdate", default=None,
help="Disable autoupdate if running as exe", action='store_true')
parser.add_argument("-i", "--ini", dest="launcherINI", default="Launcher.ini",
help="Set the location of the Launcher INI")
args = parser.parse_args()
if args.daemon:
if args.consolepid and args.launcherpid:
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
SW_HIDE = 0
hWnd = kernel32.GetConsoleWindow()
if hWnd:
user32.ShowWindow(hWnd, SW_HIDE)
AstroDaemon().daemon(args.launcherpid, args.consolepid)
else:
print("Insufficient launch options!")
else:
AstroLauncher(
args.path, disable_auto_update=args.noautoupdate, launcherINI=args.launcherINI)
except KeyboardInterrupt:
pass
except Exception as err:
ermsg1 = ('FINAL Error on line {}'.format(
sys.exc_info()[-1].tb_lineno), type(err).__name__, err)
AstroLogging.logPrint(f"{ermsg1}", "critical", True)
|
HiwinRA605_socket_ros_20190604111620.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
if case(Taskcmd.Action_Type.Mode):
data = TCP.SetMode(socket_cmd.grip,0)
break
socket_cmd.action= 5 ##切換無mode
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Arm_feedback = TCP.Is_busy(feedback)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
parallel_python.py | # Parallel processing can be done in single machines or distributed in a cluster
# There are many packages for both.
# Single machines include multiprocessing & the default package concurrent.futures which work very similarly
# Clusters include parallel python and jug
# More here: https://wiki.python.org/moin/ParallelProcessing
# More on multiprocessing vs multithreading
# https://medium.com/towards-artificial-intelligence/the-why-when-and-how-of-using-python-multi-threading-and-multi-processing-afd1b8a8ecca
# CPU-bound tasks > multiprocessing
# calculations
# IO-bound tasks > multithreading
# network
# read-write files
# MULTIPROCESSING
# check no. of cores
#---------------------------------------
import multiprocessing as mp
print(mp.cpu_count())
#---------------------------------------
import multiprocessing as mp
def worker(files):
some task
def pooling(allfiles, name, newfolderpath, acctHeader, fieldnamefile):
list_to_iterate = [file1, file2, file3, file4]
process_spawn = min(mp.cpu_count(), len(allfiles))
pool = mp.Pool(process_spawn)
pool.map(worker, list_to_iterate, chunksize=1)
pool.close()
#---------------------------------------
# for worker function that require more than one variable, use 'partial' function to 'group' them together
import multiprocessing as mp
from functools import partial
def fdtw(match, pattern, features, distances):
do_something
def pooling(win_slices, pattern, features, cores=1):
processes = round(min(mp.cpu_count(), len(win_slices))*cores)
pool = mp.Pool(processes)
fdtw_partial = partial(fdtw, pattern=pattern, features=features, distances=distances)
pool.map(fdtw_partial, win_slices, chunksize=1)
pool.close()
# use multiprocessing pool
# https://www.youtube.com/watch?v=s1SkCYMnfbY
#---------------------------------------
# EXAMPLE 1
# unzip files from different folders
import tarfile
import gzip
import os
import multiprocessing as mp
from time import time
# worker, task to process, i.e., unzipping
def unziptar(folder):
"""worker unzips one file"""
for file in os.listdir(folder):
filepath = os.path.join(folder, file)
if file.endswith("tar.gz"):
print('extracting... {}'.format(filepath))
tar = tarfile.open(filepath, 'r:gz')
tar.extractall(os.path.dirname(filepath))
tar.close()
# spawn processes for each loop
def fanout_unziptar(path):
"""create pool to extract all"""
# collect all paths of tar.gz
my_files = []
for root, dirs, files in os.walk(path):
for i in files:
if i.endswith("tar.gz"):
my_files.append(root)
# my_files.append(os.path.join(root, i))
my_files = set(my_files) #remove duplicates
# set number of workers
# note, for unzipping its I/O intensive~ so allocating too many cores will burn the RAM and make it even slower.
pool = mp.Pool(processes=4)
# separate the list of files such that only 1 file take one core (chunksize)
pool.map(unziptar, my_files, chunksize=1)
pool.close()
# need this "if" else windows will have recursive error
# a python entry point for a function
if __name__ == "__main__":
start = time()
path = r"/Users/jake/Desktop/test"
fanout_unziptar(path)
end = time()
print('script ended after {} mins'.format((end-start)/60))
# set special list or dict for results of multiprocess to pump within
#---------------------------------------
l = mp.Manager().list()
# e.g ----
urls = ["url1", "url2", "url3", "url4"]
def worker(url, l):
"""call individual recommenders & get predictions"""
data = {"resultSize": "something"}
prediction = requests.post(url, json=data).content
prediction = json.loads(prediction)
l.append(prediction)
def multiproc(urls):
l = mp.Manager().list()
worker_ = partial(worker, l=l)
pool = mp.Pool(4)
pool.map(worker_, urls, chunksize=1)
pool.close()
return l
d = mp.Manager().dict()
# convert dict proxy to dict
d = json.dumps(d.copy())
d = json.loads(d)
# set error logging
import logging
mpl = mp.log_to_stderr()
mpl.setLevel(logging.INFO)
def find_background(img, imgfolder, color="white", threshold="0.3"):
"worker that that delete images"
os.remove("some images")
return img
def find_background_pool(imgfolder, engine, keywords):
"""parallel processing"""
processes = round(min(mp.cpu_count(), len(img_list_new)))
pool = mp.Pool(processes)
find_background_ = partial(find_background, imgfolder=imgfolder, color="white", threshold=0.3)
# returned images will be all stored within a list
deleted_imgs = pool.map(find_background_, img_list_new)
pool.close()
# execute different functions concurrently -----
# v1
from functools import partial
from multiprocessing import Pool
def a(param1, param2, param3):
return param1 + param2 + param3
def b(param1, param2):
return param1 + param2
def smap(f):
return f()
func1 = partial(a, 1, 2, 3)
func2 = partial(b, 1, 2)
pool = Pool(processes=2)
res = pool.map(smap, [func1, func2])
pool.close()
pool.join()
print(res)
# v2
from multiprocessing import Process
import os
import datetime
def func_1(title):
now = datetime.datetime.now()
print "hello, world"
print "Current second: %d" % now.second
print "Current microsecond: %d" % now.microsecond
def func_2(name):
now = datetime.datetime.now()
print "Bye, world"
print "Current second: %d" % now.second
print "Current microsecond: %d" % now.microsecond
if __name__ == '__main__':
procs = []
procs.append(Process(target=func_2, args=('bob',)))
procs.append(Process(target=func_1, args=('sir',)))
map(lambda x: x.start(), procs)
map(lambda x: x.join(), procs)
# MULTI THREADING
# https://stackoverflow.com/questions/48994440/execute-a-function-after-flask-returns-response/51013358
from threading import Thread
def do_something(json_input):
something = json_input
return something
thread = Thread(target=do_something, kwargs={'json_input': json})
thread.start()
# https://www.digitalocean.com/community/tutorials/how-to-use-threadpoolexecutor-in-python-3
import requests
import concurrent.futures
def get_wiki_page_existence(wiki_page_url, timeout=10):
response = requests.get(url=wiki_page_url, timeout=timeout)
page_status = "unknown"
if response.status_code == 200:
page_status = "exists"
elif response.status_code == 404:
page_status = "does not exist"
return wiki_page_url + " - " + page_status
wiki_page_urls = [
"https://en.wikipedia.org/wiki/Ocean",
"https://en.wikipedia.org/wiki/Island",
"https://en.wikipedia.org/wiki/this_page_does_not_exist",
"https://en.wikipedia.org/wiki/Shark",
]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for url in wiki_page_urls:
futures.append(executor.submit(get_wiki_page_existence, wiki_page_url=url))
for future in concurrent.futures.as_completed(futures):
print(future.result())
# note, to use multiprocess with concurrent futures we can just use this
# with concurrent.futures.ProcessPoolExecutor(NUM_CORES) as executor:
# RAY
#---------------------------------------
# use apache arrow backend, supposed much faster then multiprocessing
# https://towardsdatascience.com/10x-faster-parallel-python-without-python-multiprocessing-e5017c93cce1
# execute embarassingly parallel task -----
import ray
ray.init()
@ray.remote
def f(x):
return x * x
futures = [f.remote(i) for i in range(4)]
print(ray.get(futures))
# execute different functions concurrently -----
import ray
ray.init(num_cpus=2)
# Define functions you want to execute in parallel using
# the ray.remote decorator.
@ray.remote
def func1():
print("Working1")
@ray.remote
def func2():
print("Working2")
# Execute func1 and func2 in parallel.
for i in range(20):
ray.get([func1.remote(), func2.remote()])
# Joblib
#---------------------------------------
# sequential
sentences = [preprocess(text) for text in pages]
# parallel
from joblib import Parallel, delayed
sentences = Parallel(n_jobs=5)(delayed(preprocess)(text) for text in pages) |
JointE+ATT.py | #coding:utf-8
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
import threading
import json
ll1 = ctypes.cdll.LoadLibrary
lib_cnn = ll1("./init_cnn.so")
ll2 = ctypes.cdll.LoadLibrary
lib_kg = ll2("./init_know.so")
class Config(object):
def __init__(self):
self.instanceTot = lib_cnn.getInstanceTot()
self.sequence_size = lib_cnn.getLenLimit()
self.num_classes = lib_cnn.getRelationTotal()
self.num_words = lib_cnn.getWordTotal()
self.num_positions = 2 * lib_cnn.getPositionLimit() + 1
self.word_size = lib_cnn.getWordDimension()
self.position_size = 5
self.embedding_size = self.word_size + self.position_size * 2
self.filter_size = 3
self.num_filters = 230
self.relation_size = self.word_size
self.dropout_keep_prob = 0.5
self.l2_lambda = 0.0001
self.NA = 51
lib_cnn.setNA(self.NA)
lib_cnn.setRate(3)
self.margin = 1.0
self.nbatches = 100
self.trainTimes = 15
self.entityTotal = 0
self.relationTotal = 0
class Model(object):
def __init__(self, config):
sequence_size = config.sequence_size
num_classes = config.num_classes
num_words = config.num_words
num_positions = config.num_positions
embedding_size = config.embedding_size
word_size = config.word_size
position_size = config.position_size
relation_size = config.relation_size
filter_size = config.filter_size
num_filters = config.num_filters
dropout_keep_prob = config.dropout_keep_prob
margin = config.margin
l2_lambda = config.l2_lambda
self.input_x = tf.placeholder(tf.int32, [None, sequence_size], name = "input_x")
self.input_p_h = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_h")
self.input_p_t = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_t")
self.input_r = tf.placeholder(tf.int32, [1, 1], name = "input_r")
self.input_r_n = tf.placeholder(tf.float32, [1, 1], name = "input_r_n")
self.input_h = tf.placeholder(tf.int32, [1, 1], name = "input_h")
self.input_t = tf.placeholder(tf.int32, [1, 1], name = "input_t")
self.input_y = tf.placeholder(tf.float32, [1, num_classes], name = "input_y")
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
l2_loss = tf.constant(0.0)
with tf.name_scope("embedding-lookup"):
self.word_embeddings = tf.Variable(word_embeddings, name="word_embeddings")
self.relation_embeddings = tf.get_variable("relation_embeddings", [config.relationTotal, word_size])
self.position_embeddings = tf.get_variable("position_embeddings", [num_positions, position_size])
self.relation_attention = tf.get_variable("relation_attention", [num_classes, relation_size])
self.NAattention = tf.get_variable("NAattention", [relation_size, 1])
self.attention = tf.get_variable("attention", [num_filters, relation_size])
self.r = tf.nn.embedding_lookup(self.attention, self.input_r)
#know
pos_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.neg_r)
#cnn
self.x_initial = tf.nn.embedding_lookup(self.word_embeddings, self.input_x)
self.x_p_h = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_h)
self.x_p_t = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_t)
self.x = tf.expand_dims(tf.concat(2, [self.x_initial, self.x_p_h, self.x_p_t]), -1)
self.head = tf.nn.embedding_lookup(self.word_embeddings, self.input_h)
self.tail = tf.nn.embedding_lookup(self.word_embeddings, self.input_t)
l2_loss += tf.nn.l2_loss(self.attention)
with tf.name_scope("conv-maxpool"):
self.W = tf.get_variable("W", [filter_size, embedding_size, 1, num_filters])
self.b = tf.get_variable("b", [num_filters])
conv = tf.nn.conv2d(self.x, self.W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.tanh(tf.nn.bias_add(conv, self.b), name="tanh")
self.y = tf.nn.max_pool(h, ksize=[1, sequence_size - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool")
l2_loss += tf.nn.l2_loss(self.W)
l2_loss += tf.nn.l2_loss(self.b)
self.y = tf.reshape(self.y, [-1, num_filters])
with tf.name_scope('attention'):
self.r = tf.reshape(self.r, [relation_size, -1])
self.e = tf.matmul(tf.matmul(self.y, self.attention), self.r)
alpha = tf.reshape(self.e, [1, -1])
self.alpha_reshape = tf.nn.softmax(alpha)
self.y_attention = tf.matmul(self.alpha_reshape, self.y)
with tf.name_scope("dropout"):
self.y_attention = tf.nn.l2_normalize(self.y_attention, 1)
self.h_drop = tf.nn.dropout(self.y_attention, dropout_keep_prob)
self.transfer_w = tf.get_variable("transfer_w", [num_filters, num_classes])
self.scores = tf.matmul(self.h_drop, self.transfer_w)
l2_loss += tf.nn.l2_loss(self.transfer_w)
with tf.name_scope("loss"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss_cnn = tf.reduce_mean(cross_entropy) + l2_lambda * l2_loss
pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.loss_kg = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
with tf.name_scope("accuracy"):
self.predictions = tf.argmax(self.scores, 1, name="predictions")
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
bags_sum = 0.0
bags_hit_NA = 0.0
sum_NA = 0.0
sum_fNA = 0.0
bags_hit = 0.0
loss_sum = 0.0
if __name__ == "__main__":
lib_cnn.readWordVec()
lib_cnn.readFromFile()
lib_kg.init()
np.random.seed(0)
tf.set_random_seed(0)
config = Config()
word_embeddings = np.zeros(config.num_words * config.word_size, dtype = np.float32)
lib_cnn.getWordVec.argtypes = [ctypes.c_void_p]
lib_cnn.getWordVec(word_embeddings.__array_interface__['data'][0])
word_embeddings.resize((config.num_words,config.word_size))
config.batch_size = lib_kg.getTripleTotal() / config.nbatches
config.entityTotal = lib_kg.getEntityTotal()
config.relationTotal = lib_kg.getRelationTotal()
with tf.Graph().as_default():
conf = tf.ConfigProto()
sess = tf.Session(config=conf)
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope("model", reuse=None, initializer = initializer):
m = Model(config = config)
global_step_cnn = tf.Variable(0, name="global_step_cnn", trainable=False)
optimizer_cnn = tf.train.GradientDescentOptimizer(0.01)
grads_and_vars_cnn = optimizer_cnn.compute_gradients(m.loss_cnn)
train_op_cnn = optimizer_cnn.apply_gradients(grads_and_vars_cnn, global_step = global_step_cnn)
global_step_kg = tf.Variable(0, name="global_step_kg", trainable=False)
optimizer_kg = tf.train.GradientDescentOptimizer(0.001)
grads_and_vars_kg = optimizer_kg.compute_gradients(m.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step=global_step_kg)
sess.run(tf.initialize_all_variables())
def outEmbedding(str1):
word_embeddings, relation_embeddings, position_embeddings, relation_attention, attention, W, B, transfer_w, transfer_b, softmax_w, softmax_b = sess.run([m.word_embeddings, m.relation_embeddings, m.position_embeddings, m.relation_attention, m.attention, m.W, m.b, m.transfer_w, m.transfer_b, m.softmax_w, m.softmax_b])
log = open("log"+str1+".txt", "w")
log.write(json.dumps(word_embeddings.tolist())+"\n")
log.write(json.dumps(relation_embeddings.tolist())+"\n")
log.write(json.dumps(position_embeddings.tolist())+"\n")
log.write(json.dumps(relation_attention.tolist())+"\n")
log.write(json.dumps(attention.tolist())+"\n")
log.write(json.dumps(W.tolist())+"\n")
log.write(json.dumps(B.tolist())+"\n")
log.write(json.dumps(transfer_w.tolist())+"\n")
NAattention = sess.run(m.NAattention)
log.write(json.dumps(NAattention.tolist()) + "\n")
log.close()
x_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_t_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_h_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
r_batch = np.zeros((1, 1), dtype = np.int32)
y_batch = np.zeros((1, config.num_classes), dtype = np.int32)
r_n_batch = np.zeros((1, 1), dtype = np.float32)
h_batch = np.zeros((1, 1), dtype = np.int32)
t_batch = np.zeros((1, 1), dtype = np.int32)
x_batch_addr = x_batch.__array_interface__['data'][0]
p_t_batch_addr = p_t_batch.__array_interface__['data'][0]
p_h_batch_addr = p_h_batch.__array_interface__['data'][0]
y_batch_addr = y_batch.__array_interface__['data'][0]
r_batch_addr = r_batch.__array_interface__['data'][0]
r_n_batch_addr = r_n_batch.__array_interface__['data'][0]
h_batch_addr = h_batch.__array_interface__['data'][0]
t_batch_addr = t_batch.__array_interface__['data'][0]
lib_cnn.batch_iter.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
tipTotal = lib_cnn.getTipTotal()
loop = 0
def train_cnn(coord):
def train_step_cnn(x_batch, p_h_batch, p_t_batch, y_batch, r_batch, r_n_batch, h_batch, t_batch):
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
feed_dict = {
m.input_x: x_batch,
m.input_p_h: p_h_batch,
m.input_p_t: p_t_batch,
m.input_r: r_batch,
m.input_r_n: r_n_batch,
m.input_y: y_batch,
m.input_h: h_batch,
m.input_t: t_batch
}
_, step, loss, accuracy = sess.run(
[train_op_cnn, global_step_cnn, m.loss_cnn, m.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
loss_sum += loss
bags_sum += 1
if (r_batch[0]!=config.NA):
sum_fNA += 1
if accuracy > 0.5:
bags_hit += 1.0
else:
sum_NA += 1
if accuracy > 0.5:
bags_hit_NA += 1.0
if bags_sum % 1000 == 0:
if (sum_NA == 0):
sum_NA+=1
if (sum_fNA == 0):
sum_fNA+=1
print("{}: step {}, loss {:g}, acc {:g} acc {:g} {} {}".format(time_str, step, loss_sum/bags_sum, bags_hit_NA/sum_NA, bags_hit/sum_fNA, sum_NA, sum_fNA))
global loop
while not coord.should_stop():
print 'Looping ', loop
outEmbedding(str(loop))
for i in range(tipTotal):
length = lib_cnn.batch_iter(x_batch_addr, p_h_batch_addr, p_t_batch_addr, y_batch_addr, r_batch_addr, r_n_batch_addr, h_batch_addr, t_batch_addr)
train_step_cnn(x_batch[0:length,], p_h_batch[0:length,], p_t_batch[0:length,], y_batch, r_batch, r_n_batch, h_batch, t_batch)
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
bags_sum = 0
bags_hit = 0
bags_hit_NA = 0
loss_sum = 0
sum_fNA = 0
sum_NA = 0
loop += 1
if loop == config.trainTimes:
coord.request_stop()
ph = np.zeros(config.batch_size, dtype = np.int32)
pt = np.zeros(config.batch_size, dtype = np.int32)
pr = np.zeros(config.batch_size, dtype = np.int32)
nh = np.zeros(config.batch_size, dtype = np.int32)
nt = np.zeros(config.batch_size, dtype = np.int32)
nr = np.zeros(config.batch_size, dtype = np.int32)
ph_addr = ph.__array_interface__['data'][0]
pt_addr = pt.__array_interface__['data'][0]
pr_addr = pr.__array_interface__['data'][0]
nh_addr = nh.__array_interface__['data'][0]
nt_addr = nt.__array_interface__['data'][0]
nr_addr = nr.__array_interface__['data'][0]
lib_kg.getBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
times_kg = 0
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
m.pos_h: pos_h_batch,
m.pos_t: pos_t_batch,
m.pos_r: pos_r_batch,
m.neg_h: neg_h_batch,
m.neg_t: neg_t_batch,
m.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op_kg, global_step_kg, m.loss_kg], feed_dict)
return loss
global times_kg
while not coord.should_stop():
times_kg += 1
res = 0.0
for batch in range(config.nbatches):
lib_kg.getBatch(ph_addr, pt_addr, pr_addr, nh_addr, nt_addr, nr_addr, config.batch_size)
res += train_step_kg(ph, pt, pr, nh, nt, nr)
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_cnn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
|
subprocess.py | # coding: utf-8
"""
Calling shell processes.
"""
import shlex
import threading
import traceback
from subprocess import Popen, PIPE
from .string import is_string
__author__ = "Matteo Giantomass"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo@gmail.com"
__date__ = "10/26/14"
class Command:
"""
Enables to run subprocess commands in a different thread with TIMEOUT
option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
and
https://gist.github.com/kirpit/1306188
.. attribute:: retcode
Return code of the subprocess
.. attribute:: killed
True if subprocess has been killed due to the timeout
.. attribute:: output
stdout of the subprocess
.. attribute:: error
stderr of the subprocess
Example:
com = Command("sleep 1").run(timeout=2)
print(com.retcode, com.killed, com.output, com.output)
"""
def __init__(self, command):
"""
:param command: Command to execute
"""
if is_string(command):
command = shlex.split(command)
self.command = command
self.process = None
self.retcode = None
self.output, self.error = "", ""
self.killed = False
def __str__(self):
return "command: %s, retcode: %s" % (self.command, self.retcode)
def run(self, timeout=None, **kwargs):
"""
Run a command in a separated thread and wait timeout seconds.
kwargs are keyword arguments passed to Popen.
Return: self
"""
def target(**kw):
try:
# print('Thread started')
self.process = Popen(self.command, **kw)
self.output, self.error = self.process.communicate()
self.retcode = self.process.returncode
# print('Thread stopped')
except Exception:
self.error = traceback.format_exc()
self.retcode = -1
# default stdout and stderr
if "stdout" not in kwargs:
kwargs["stdout"] = PIPE
if "stderr" not in kwargs:
kwargs["stderr"] = PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
# print("Terminating process")
self.process.terminate()
self.killed = True
thread.join()
return self
|
test_drivers_local.py | import os
import shutil
import multiprocessing as mp
import time
import hashlib
import pytest
from cloudstorage.drivers.local import LocalDriver
from cloudstorage.exceptions import (
CredentialsError,
IsNotEmptyError,
NotFoundError,
SignatureExpiredError,
)
from tests import settings
from tests.helpers import random_container_name, uri_validator
if settings.LOCAL_KEY and not os.path.exists(settings.LOCAL_KEY):
os.makedirs(settings.LOCAL_KEY)
pytestmark = pytest.mark.skipif(
not os.path.isdir(settings.LOCAL_KEY), reason="Directory does not exist."
)
@pytest.fixture(scope="module")
def storage():
driver = LocalDriver(key=settings.LOCAL_KEY, secret=settings.LOCAL_SECRET)
yield driver
for container in driver: # cleanup
if container.name.startswith(settings.CONTAINER_PREFIX):
for blob in container:
blob.delete()
container.delete()
shutil.rmtree(settings.LOCAL_KEY)
def test_driver_validate_credentials():
if os.name == "nt":
pytest.skip("skipping Windows incompatible test")
driver = LocalDriver(key=settings.LOCAL_KEY)
assert driver.validate_credentials() is None
driver = LocalDriver(key="/")
with pytest.raises(CredentialsError) as excinfo:
driver.validate_credentials()
assert excinfo.value
assert excinfo.value.message
# noinspection PyShadowingNames
def test_driver_create_container(storage):
container_name = random_container_name()
container = storage.create_container(container_name)
assert container_name in storage
assert container.name == container_name
# noinspection PyShadowingNames
def test_driver_get_container(storage, container):
container_get = storage.get_container(container.name)
assert container_get.name in storage
assert container_get == container
# noinspection PyShadowingNames
def test_container_get_invalid(storage):
container_name = random_container_name()
# noinspection PyTypeChecker
with pytest.raises(NotFoundError):
storage.get_container(container_name)
# noinspection PyShadowingNames
def test_container_delete(storage):
container_name = random_container_name()
container = storage.create_container(container_name)
container.delete()
assert container.name not in storage
def test_container_delete_not_empty(container, text_blob):
assert text_blob in container
# noinspection PyTypeChecker
with pytest.raises(IsNotEmptyError):
container.delete()
def test_container_enable_cdn(container):
assert not container.enable_cdn(), "Local does not support enabling CDN."
def test_container_disable_cdn(container):
assert not container.disable_cdn(), "Local does not support disabling CDN."
def test_container_cdn_url(container):
container.enable_cdn()
cdn_url = container.cdn_url
assert uri_validator(cdn_url)
assert container.name in cdn_url
# noinspection PyShadowingNames
def test_container_generate_upload_url(storage, container):
form_post = container.generate_upload_url(
settings.BINARY_FORM_FILENAME, **settings.BINARY_OPTIONS
)
assert "url" in form_post and "fields" in form_post
assert "signature" in form_post["fields"]
signature = form_post["fields"]["signature"]
payload = storage.validate_signature(signature)
assert (
payload["content_disposition"] == settings.BINARY_OPTIONS["content_disposition"]
)
assert payload["cache_control"] == settings.BINARY_OPTIONS["cache_control"]
assert payload["blob_name"] == settings.BINARY_FORM_FILENAME
assert payload["container"] == container.name
assert payload["meta_data"] == settings.BINARY_OPTIONS["meta_data"]
# noinspection PyShadowingNames
def test_container_generate_upload_url_expiration(storage, container):
form_post = container.generate_upload_url(settings.TEXT_FORM_FILENAME, expires=-10)
signature = form_post["fields"]["signature"]
with pytest.raises(SignatureExpiredError):
storage.validate_signature(signature)
def test_container_get_blob(container, text_blob):
text_get_blob = container.get_blob(text_blob.name)
assert text_get_blob == text_blob
def test_container_get_blob_invalid(container):
blob_name = random_container_name()
# noinspection PyTypeChecker
with pytest.raises(NotFoundError):
container.get_blob(blob_name)
def test_blob_upload_path(container, text_filename):
blob = container.upload_blob(text_filename)
assert blob.name == settings.TEXT_FILENAME
assert blob.checksum == settings.TEXT_MD5_CHECKSUM
def test_blob_windows_xattr(container, text_filename):
if os.name != "nt":
pytest.skip("skipping Windows-only test")
container.upload_blob(text_filename, meta_data={"test": "testvalue"})
try:
container.get_blob(".{}.xattr".format(settings.TEXT_FILENAME))
pytest.fail("should not be possible to get internal xattr file")
except NotFoundError:
pass
def test_blob_windows_xattr_list(container, text_filename):
if os.name != "nt":
pytest.skip("skipping Windows-only test")
container.upload_blob(text_filename, meta_data={"test": "testvalue"})
for blobitem in container:
if blobitem.name.startswith(".") and blobitem.name.endswith(".xattr"):
pytest.fail("should not be possible to get internal xattr file")
def test_blob_upload_stream(container, binary_stream):
blob = container.upload_blob(
filename=binary_stream,
blob_name=settings.BINARY_STREAM_FILENAME,
**settings.BINARY_OPTIONS,
)
assert blob.name == settings.BINARY_STREAM_FILENAME
assert blob.checksum == settings.BINARY_MD5_CHECKSUM
def test_blob_upload_stream_interrupted(container, binary_bytes):
BLOB_NAME = "data.bin"
md5 = hashlib.md5()
md5.update(binary_bytes.getbuffer())
mk5_checksum = md5.hexdigest()
def _upload():
container.upload_blob(filename=binary_bytes, blob_name=BLOB_NAME)
p = mp.Process(target=_upload)
p.start()
time.sleep(0.01)
os.kill(p.pid, 9)
p.join()
bad_blob = container.get_blob(BLOB_NAME + ".tmp")
assert bad_blob.checksum != mk5_checksum
bad_blob.delete()
with pytest.raises(NotFoundError):
container.get_blob(BLOB_NAME)
@pytest.mark.skipif(
settings.LOCAL_KEY.startswith("/tmp"),
reason="Extended attributes are not supported for tmpfs file system.",
)
def test_blob_upload_options(container, binary_stream):
blob = container.upload_blob(
binary_stream,
blob_name=settings.BINARY_STREAM_FILENAME,
**settings.BINARY_OPTIONS,
)
assert blob.name == settings.BINARY_STREAM_FILENAME
assert blob.checksum == settings.BINARY_MD5_CHECKSUM
assert blob.meta_data == settings.BINARY_OPTIONS["meta_data"]
assert blob.content_type == settings.BINARY_OPTIONS["content_type"]
assert blob.content_disposition == settings.BINARY_OPTIONS["content_disposition"]
assert blob.cache_control == settings.BINARY_OPTIONS["cache_control"]
def test_blob_delete(container, text_blob):
text_blob.delete()
assert text_blob not in container
def test_blob_cdn_url(binary_blob):
cdn_url = binary_blob.cdn_url
assert uri_validator(cdn_url)
assert binary_blob.container.name in cdn_url
assert binary_blob.name in cdn_url
# noinspection PyShadowingNames
def test_blob_generate_download_url(storage, binary_blob):
content_disposition = settings.BINARY_OPTIONS.get("content_disposition")
signature = binary_blob.generate_download_url(
content_disposition=content_disposition
)
payload = storage.validate_signature(signature)
assert payload["blob_name"] == binary_blob.name
assert payload["container"] == binary_blob.container.name
assert payload["content_disposition"] == content_disposition
# noinspection PyShadowingNames
def test_blob_generate_download_url_expiration(storage, binary_blob):
signature = binary_blob.generate_download_url(expires=-10)
with pytest.raises(SignatureExpiredError):
storage.validate_signature(signature)
|
mqtt_redis_gateway_py3.py | #
# MQTT To Redis Bridge
#
#
#
import json
import msgpack
import base64
import redis
from redis_support_py3.mqtt_to_redis_py3 import MQTT_TO_REDIS_BRIDGE_STORE
import paho.mqtt.client as mqtt
import ssl
class MQTT_Redis_Bridge(object):
def __init__(self,redis_site_data):
self.redis_site_data = redis_site_data
self.mqtt_bridge = MQTT_TO_REDIS_BRIDGE_STORE(redis_site_data,100)
self.client = mqtt.Client(client_id="", clean_session=True, userdata=None, transport="tcp")
self.client.tls_set(certfile= "../mosquitto/certs/client.crt", keyfile= "../mosquitto/certs/client.key", cert_reqs=ssl.CERT_NONE )
redis_handle_pw = redis.StrictRedis(redis_site_data["host"],
redis_site_data["port"],
db=redis_site_data["redis_password_db"],
decode_responses=True)
self.client.username_pw_set("cloud", redis_handle_pw.hget("mosquitto_local","cloud"))
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect(redis_site_data["mqtt_server"],redis_site_data["mqtt_port"], 60)
self.client.loop_forever()
def on_connect(self,client, userdata, flags, rc):
print("Connected with result code "+str(rc),self.redis_site_data["mqtt_topic"])
self.client.subscribe(self.redis_site_data["mqtt_topic"])
# The callback for when a PUBLISH message is received from the server.
def on_message(self, client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
self.mqtt_bridge.store_mqtt_data(msg.topic,msg.payload)
__test__ = False
if __name__ == "__main__":
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
if __test__== False:
MQTT_Redis_Bridge(redis_site_data)
else:
#test code
import time
from threading import Thread
from redis_support_py3.mqtt_client_py3 import MQTT_CLIENT
from redis_support_py3.mqtt_to_redis_py3 import MQTT_TO_REDIS_BRIDGE_RETRIEVE
def test_driver(redis_site_data):
MQTT_Redis_Bridge(redis_site_data)
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
server = Thread(target=test_driver,args=(redis_site_data,))
server.start()
mqtt_client = MQTT_CLIENT(redis_site_data,redis_site_data["mqtt_server"],redis_site_data["mqtt_port"],"pi","mosquitto_local")
print("client connected",mqtt_client.connect())
print("starting to publish")
print("message published",mqtt_client.publish("REMOTES/SLAVE:Node_1/TEMPERATURE:Case",msgpack.packb(72,use_bin_type = True )))
print("made it here")
mqtt_retreive = MQTT_TO_REDIS_BRIDGE_RETRIEVE(redis_site_data)
print("instantiated class")
time.sleep(1) # let message be published
query_list = []
mqtt_retreive.add_mqtt_match_relationship( query_list,"SLAVE" )
print("Match on SLAVE",mqtt_retreive.match_mqtt_list( query_list ))
query_list = []
mqtt_retreive.add_mqtt_match_relationship( query_list,"SLAVE",label= "Node_1" )
print("Match on SLAVE:Node_1",mqtt_retreive.match_mqtt_list( query_list ))
query_list = []
mqtt_retreive.add_mqtt_match_relationship( query_list,"TEMPERATURE",label= "Case" )
print("Match on TEMPERATURE:Case",mqtt_retreive.match_mqtt_list( query_list ))
query_list = []
mqtt_retreive.add_mqtt_match_terminal( query_list,"TEMPERATURE" )
print("Match on TEMPERATURE",mqtt_retreive.match_mqtt_list( query_list ))
query_list = []
mqtt_retreive.add_mqtt_match_terminal( query_list,"TEMPERATURE",label= "Case" )
nodes = mqtt_retreive.match_mqtt_list( query_list)
print("Match on TEMPERATURE:Case",nodes)
nodes = list(nodes)
print(mqtt_retreive.xrange_namespace_list( nodes, "+", "-" , count=100))
|
client_threading_demo.py | # -*- coding: utf-8 -*-
import threading
import logging
import time
from tutorial import TutorialService
from bfd.harpc import client
from bfd.harpc.common import config
threads = 15
req_num = 10
data = []
error = 0
for i in range(0,10240):
data.append(chr(i%64 + 32))
test_msg= ''.join(data)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
filename='./logs/clientdemo.log',
filemode='w')
def process(proxy_client, process_num):
global error
for i in range(0,req_num):
try:
proxy_client.echo(test_msg)
except Exception as e:
error = error + 1
print "request error %s" %e
print("process_num:%s end" % process_num)
if __name__ == '__main__':
# read config file
#conf = config.Config("./etc/demo_client.conf")
# setting config use zk
conf = config.Config()
conf.set("client", "service", "python_test$EchoService")
conf.set("client", "zk_connect_str", "172.18.1.22:2181")
manager = client.Client(TutorialService.Client, conf)
proxy_client = manager.create_proxy()
jobs = []
# 创建多线程,多线程公用一个proxy_client
start = time.time()
for i in range(0, threads):
td = threading.Thread(target=process, args=(proxy_client, i))
td.start()
jobs.append(td)
for job in jobs:
job.join()
end = time.time()
req_time = end-start
total = req_num*threads
print "total : %s" % total
print "total time: %s" % req_time
print "error num : %s" % error
print "tps : %s" % (total/req_time)
manager.close()
|
app.py | from flask import render_template, Flask, request, jsonify
from flask_socketio import SocketIO
from queue import Queue
from termcolor import colored
import json
import subprocess
import sys
import threading
import time
import logging
import os
import signal
import speech_recognition as sr
processes = {}
app = Flask(__name__)
app.logger.disabled = True
logger = logging.getLogger('werkzeug')
logger.disabled = True
settingsFile = "settings.json"
socket = SocketIO(app)
audio_queue = Queue()
recognizer = sr.Recognizer()
settings_local = {
"do_listen": True
}
def load(jf):
with open(jf) as f:
r = json.load(f)
f.close()
return r
def settings():
return load(settingsFile)
def write(fp, new):
with open(fp, 'w+') as f:
f.seek(0)
json.dump(new, f, indent=4)
f.close()
def thread(f, args=(), daemon=None):
threading.Thread(target=f, args=args, daemon=daemon).start()
def botprint(resp):
print(f"{bot_name} : {resp}")
os.system(f'{settings()["say_command"]} "{resp}"')
# Load stuff from settings
modules_json = settings()['modules']['filepath']
startup_modules_json = settings()['startup_modules']['filepath']
modules_folder = settings()['modules']['content_folder']
startup_modules_folder = settings()['startup_modules']['content_folder']
bot_name = settings()['name']
recognition = settings()['voice_recognition']
recognizer.energy_threshold = recognition['energy_threshold']
recognizer.pause_threshold = recognition['pause_threshold']
recognizer.dynamic_energy_threshold = recognition['dynamic_energy_threshold']
# Process the given text
def process(text, ignore=None):
modules = load(modules_json)
def run(process_name, file, block, args=None, get_output=True):
out = None
launches = [settings()['launch'], file]
if args:
launches.append(args)
if not block:
def dontBlock():
try:
out = subprocess.check_output(launches).decode(sys.stdout.encoding)
botprint(out)
except Exception as e:
botprint(str(e))
if get_output:
thread(dontBlock)
else:
processes[process_name] = subprocess.Popen(launches)
return
# Will block
try:
if get_output:
out = subprocess.check_output(launches).decode(sys.stdout.encoding)
botprint(out)
else:
subprocess.call(launches)
except Exception as e:
botprint(str(e))
return out
for module in sorted(modules['data'], key=lambda k: k['index'], reverse=True):
block = module['block']
file = f"{modules_folder}/{module['filename']}"
get_output = module['get_output']
process_name = module['process_name']
if module['process_name'] != ignore:
for trigger in module['triggers']:
if module['query']:
if text.startswith(trigger['trigger'][:trigger['trigger'].find(" {q}")]):
q = text[trigger['trigger'].find("{q}"):]
if q:
run(process_name, file, block, args=q, get_output=get_output)
elif trigger['startswith']:
if text.startswith(trigger['trigger']):
run(process_name, file, block, get_output=get_output)
else:
if trigger['trigger'] == text:
run(process_name, file, block, get_output=get_output)
# Routes
@app.route("/processes")
def getProcesses():
return jsonify({"data": list(processes.keys())})
@app.route("/killprocess/<process>")
def killprocess(process):
try:
# os.killpg(os.getpgid(processes[process].pid), signal.SIGTERM)
processes[process].terminate()
code = 200
except KeyError:
code = 404
return str(code), code
@app.route("/energy_threshold")
def _energy_threshold():
new = request.args.get('new')
updateSettings = request.args.get('updateSettings')
old = recognizer.energy_threshold
if new != None:
recognizer.energy_threshold = int(new)
if updateSettings == 'yes' and new != None:
r = settings()
r['voice_recognition']['energy_threshold'] = int(new)
write(settingsFile, r)
data = {
"old": old,
"new": int(new) if new != None else None
}
return jsonify(data)
def main():
print(colored("Started startup_modules thread", "blue"))
# Load startup modules
for module in load(startup_modules_json)['data']:
processes[module['process_name']] = subprocess.Popen([settings()['launch'], f"{startup_modules_folder}/{module['filename']}"])
print(colored(f"Started module : {module['process_name']}", "green"))
def worker():
print(colored("Started recognizer thread", 'blue'))
while True:
audio = audio_queue.get()
if audio is None: break
try:
print(colored("Recognizing...", "yellow"))
value = recognizer.recognize_google(audio)
print(colored(f"You said : {value}", "green"))
process(value)
except sr.UnknownValueError:
pass
audio_queue.task_done()
def listen():
time.sleep(3)
print(colored("Started listener thread", "blue"))
# Wait for your command
with sr.Microphone() as source:
try:
while True:
audio_queue.put(recognizer.listen(source))
except KeyboardInterrupt:
pass
if __name__ == '__main__':
thread(main)
thread(worker, daemon=True)
thread(listen, daemon=True)
socket.run(app, port=settings()['port'], use_reloader=settings()['use_reloader'], host=settings()['host'], debug=settings()['debug'])
|
MCMC.py | #!/usr/bin/env python
# =============================
# Import the necessary binaries
# =============================
import subprocess
import time
import os, sys
import json, requests
from multiprocessing import Pool, Process
import numpy as np
import signal
from scenario_var import scenario_var
import logging as lg
# =====================================
# Check Presence of Storage Directories
# =====================================
def path_checker():
#print "here"
#print os.getcwd()
flag = -1; # Initialize the flag variable
path = os.getcwd() + '/Data'; # This is the path we have to check for
subpath = os.getcwd() + '/Data/Temp'; # This is the subdirectory to store data
if os.path.isdir(path):
if os.path.isdir(subpath):
flag = 0; # We do not need to generate the scenario data again
else:
flag = 1; # Generate the Data if the Subpath does not exist
else:
flag = 1; # Generate the Data if the Path does not exist
#print flag
return flag
# ==================================
# Create a Telegram Bot Communicator
# ==================================
TOKEN = ""
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates():
url = URL + "getUpdates"
js = get_json_from_url(url)
return js
def last_chat_id(updates):
num_updates = len(updates["result"])
last_update = num_updates - 1
chat_id = updates["result"][last_update]["message"]["chat"]["id"]
return chat_id
def send_message(text, chat_id):
url = URL + "sendMessage?text={}&chat_id={}".format(text,chat_id)
get_url(url)
# ==========================
# Parallel Process Function
# ==========================
def parallel_executor(iter_num):
print ("Iteration number:", iter_num)
subprocess.call(['python',os.path.join(os.getcwd(),"main.py"), '-iter', str(iter_num), '-interf', str(0)])
def Single_assoc(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
print MCMC_iter
print chat_frequency
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '0', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def Dual_assoc(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '0','-latency', '0', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_MRT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '0','-latency', '0', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + MRT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_BHCAP(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '1','-latency', '0', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + BHCAP"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_BHCAP_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '1','-latency', '1', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + BHCAP + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '0','-latency', '1', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_MRT_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '0','-latency', '1', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + MRT + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_MRT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '0','-latency', '0', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + MRT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_BHCAP(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '1','-latency', '0', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + BHCAP"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_BHCAP_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '1','-latency', '1', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + BHCAP + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '1', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_MRT_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '0','-latency', '1', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + MRT + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_MRT_BHCAP(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '1','-latency', '0', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + MRT + BHCAP"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def DA_MRT_BHCAP_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '1','-latency', '1', '-mipGP', '1'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for DA + MRT + BHCAP + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_MRT_BHCAP(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '1','-latency', '0', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + MRT + BHCAP"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
def SA_MRT_BHCAP_LAT(MCMC_iter, chat_frequency):
for i in range(MCMC_iter):
chat = last_chat_id(get_updates()) # Get the Bot Chat ID
try:
#subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
subprocess.call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '1','-latency', '1', '-mipGP', '0'])
#chat = last_chat_id(get_updates()) # Get the Bot Chat ID
if i%chat_frequency == 0:
try:
message = "Execution of Iteration " + str(i) + " Completed for SA + MRT + BHCAP + LAT"
send_message(message,chat) # Send the Message
except(RuntimeError, TypeError, NameError, IndexError):
pass
except:
message = "Programme has encountered an error"
send_message(message, chat) # Send the message if an error has been encountered in the code
message = "Ending the Processing for Debugging"
send_message(message, chat) # Send the End process message
sys.exit('Error Encountered')
# ======================
# Monte Carlo Simulation
# ======================
sys.path.append(os.getcwd()); # Add current working directory to python path
os.chdir(os.getcwd()); # Change to the current working directory
chat_frequency = 10; # Select the divider so as to obtain timely update messages
#num_processors = int(int(subprocess.check_output(['nproc']))/2)*2; # Number of Processors to be utilized
num_processors = 2
scn = scenario_var();
MCMC_iter = scn.MCMC_iter; # Number of Monte Carlo Iterations
# =============
# Main Function
if __name__ == '__main__':
dat_gen_flag = path_checker(); # Get the Data generation flag value
if dat_gen_flag == 1:
#print "In the Generator"
file_indexer = 0; # For File Indexing
pool = Pool(processes = num_processors); # Creates a pool of 10 parallel processes to be done
for i in range(0, MCMC_iter/num_processors):
print "Entering Round " + str(i) + " of Processing"
print "------------------------------"
print ""
idx_range = np.arange(file_indexer, file_indexer + num_processors); # Data file Index numbers
pool.map(parallel_executor,idx_range.tolist()); # Maps the function to parallel processes.
file_indexer = file_indexer + num_processors; # Increase the Iterator number
print file_indexer
pool.close()
pool.join()
print "Entering the Optimizer"
# =====================================================
# Multiple Processes for Parallel Scenario Optimization
p1 = Process(target = Single_assoc, args = (MCMC_iter,chat_frequency))
p2 = Process(target = Dual_assoc, args = (MCMC_iter, chat_frequency))
p3 = Process(target = DA_MRT, args = (MCMC_iter, chat_frequency))
p4 = Process(target = DA_BHCAP, args = (MCMC_iter, chat_frequency))
p5 = Process(target = DA_BHCAP_LAT, args = (MCMC_iter, chat_frequency))
p6 = Process(target = DA_LAT, args = (MCMC_iter, chat_frequency))
p7 = Process(target = SA_MRT, args = (MCMC_iter, chat_frequency))
p8 = Process(target = SA_LAT, args = (MCMC_iter, chat_frequency))
p9 = Process(target = SA_BHCAP_LAT, args = (MCMC_iter, chat_frequency))
p10 = Process(target = SA_BHCAP, args = (MCMC_iter, chat_frequency))
p11 = Process(target = DA_MRT_LAT, args = (MCMC_iter, chat_frequency))
p12 = Process(target = SA_MRT_LAT, args = (MCMC_iter, chat_frequency))
p13 = Process(target = DA_MRT_BHCAP, args = (MCMC_iter, chat_frequency))
p14 = Process(target = DA_MRT_BHCAP_LAT, args = (MCMC_iter, chat_frequency))
p15 = Process(target = SA_MRT_BHCAP, args = (MCMC_iter, chat_frequency))
p16 = Process(target = SA_MRT_BHCAP_LAT, args = (MCMC_iter, chat_frequency))
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
p11.start()
p12.start()
p13.start()
p14.start()
p15.start()
p16.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
p10.join()
p11.join()
p12.join()
p13.join()
p14.join()
p15.join()
p16.join()
#for i in range(MCMC_iter):
# try:
# #subprocess.check_call(['python',os.path.join(os.getcwd(),"main.py")]); # Open Main File for Generating the scenario
# subprocess.check_call(['python',os.path.join(os.getcwd(),"optimizer_func.py"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '1'])
# chat = last_chat_id(get_updates()) # Get the Bot Chat ID
# if i%chat_frequency == 0:
# try:
# message = "Execution of Iteration " + str(i) + " Completed"
# send_message(message,chat) # Send the Message
# except(RuntimeError, TypeError, NameError, IndexError):
# pass
# except:
# message = "Programme has encountered an error"
# send_message(message, chat) # Send the message if an error has been encountered in the code
# message = "Ending the Processing for Debugging"
# send_message(message, chat) # Send the End process message
# sys.exit('Error Encountered')
|
test_pubsub.py | # -*- coding: utf-8 -*-
# python std lib
from __future__ import unicode_literals
import threading
import time
# rediscluster imports
from rediscluster.client import RedisCluster
# 3rd party imports
import pytest
# import redis
from redis import Redis
from redis.exceptions import ConnectionError
from redis._compat import basestring, unichr
from .conftest import _get_client
from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt
def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
message = pubsub.get_message(
ignore_subscribe_messages=ignore_subscribe_messages)
if message is not None:
return message
time.sleep(0.01)
now = time.time()
return None
def make_message(type, channel, data, pattern=None):
return {
'type': type,
'pattern': pattern and pattern.encode('utf-8') or None,
'channel': channel and channel.encode('utf-8') or None,
'data': data.encode('utf-8') if isinstance(data, basestring) else data
}
def make_subscribe_test_data(pubsub, type):
if type == 'channel':
return {
'p': pubsub,
'sub_type': 'subscribe',
'unsub_type': 'unsubscribe',
'sub_func': pubsub.subscribe,
'unsub_func': pubsub.unsubscribe,
'keys': ['foo', 'bar', 'uni' + unichr(4456) + 'code']
}
elif type == 'pattern':
return {
'p': pubsub,
'sub_type': 'psubscribe',
'unsub_type': 'punsubscribe',
'sub_func': pubsub.psubscribe,
'unsub_func': pubsub.punsubscribe,
'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*']
}
assert False, 'invalid subscribe type: {0}'.format(type)
class TestPubSubSubscribeUnsubscribe(object):
def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_func, keys):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
for key in keys:
assert unsub_func(key) is None
# should be a message for each channel/pattern we just unsubscribed
# from
for i, key in enumerate(keys):
i = len(keys) - 1 - i
assert wait_for_message(p) == make_message(unsub_type, key, i)
def test_channel_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribe_unsubscribe(**kwargs)
def test_pattern_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribe_unsubscribe(**kwargs)
def _test_resubscribe_on_reconnection(self, p, sub_type, sub_func, keys, *args, **kwargs):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
# manually disconnect
p.connection.disconnect()
# calling get_message again reconnects and resubscribes
# note, we may not re-subscribe to channels in exactly the same order
# so we have to do some extra checks to make sure we got them all
messages = []
for i, _ in enumerate(keys):
messages.append(wait_for_message(p))
unique_channels = set()
assert len(messages) == len(keys)
for i, message in enumerate(messages):
assert message['type'] == sub_type
assert message['data'] == i + 1
assert isinstance(message['channel'], bytes)
channel = message['channel'].decode('utf-8')
unique_channels.add(channel)
assert len(unique_channels) == len(keys)
for channel in unique_channels:
assert channel in keys
def test_resubscribe_to_channels_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_resubscribe_on_reconnection(**kwargs)
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_resubscribe_on_reconnection(**kwargs)
def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_func, keys):
assert p.subscribed is False
sub_func(keys[0])
# we're now subscribed even though we haven't processed the
# reply from the server just yet
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# we're still subscribed
assert p.subscribed is True
# unsubscribe from all channels
unsub_func()
# we're still technically subscribed until we process the
# response messages from the server
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# now we're no longer subscribed as no more messages can be delivered
# to any channels we were listening to
assert p.subscribed is False
# subscribing again flips the flag back
sub_func(keys[0])
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# unsubscribe again
unsub_func()
assert p.subscribed is True
# subscribe to another channel before reading the unsubscribe response
sub_func(keys[1])
assert p.subscribed is True
# read the unsubscribe for key1
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# we're still subscribed to key2, so subscribed should still be True
assert p.subscribed is True
# read the key2 subscribe message
assert wait_for_message(p) == make_message(sub_type, keys[1], 1)
unsub_func()
# haven't read the message yet, so we're still subscribed
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[1], 0)
# now we're finally unsubscribed
assert p.subscribed is False
def test_subscribe_property_with_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribed_property(**kwargs)
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribed_property(**kwargs)
def test_ignore_all_subscribe_messages(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
# (p.psubscribe, 'f*'),
# (p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
assert wait_for_message(p) is None
assert p.subscribed is False
def test_ignore_individual_subscribe_messages(self, r):
p = r.pubsub()
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
# (p.psubscribe, 'f*'),
# (p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
message = wait_for_message(p, ignore_subscribe_messages=True)
assert message is None
assert p.subscribed is False
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_sub_unsub_resub_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_sub_unsub_resub(**kwargs)
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_sub_unsub_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_sub_unsub_resub(**kwargs)
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
unsub_func(key)
sub_func(key)
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert wait_for_message(p) == make_message(unsub_type, key, 0)
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert p.subscribed is True
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_sub_unsub_all_resub_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_sub_unsub_all_resub(**kwargs)
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_sub_unsub_all_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_sub_unsub_all_resub(**kwargs)
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
unsub_func()
sub_func(key)
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert wait_for_message(p) == make_message(unsub_type, key, 0)
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert p.subscribed is True
class TestPubSubMessages(object):
"""
Bug: Currently in cluster mode publish command will behave different then in
standard/non cluster mode. See (docs/Pubsub.md) for details.
Currently Redis instances will be used to test pubsub because they
are easier to work with.
"""
def get_strict_redis_node(self, port, host="127.0.0.1"):
return Redis(port=port, host=host)
def setup_method(self, *args):
self.message = None
def message_handler(self, message):
self.message = message
def test_published_message_to_channel(self):
node = self.get_strict_redis_node(7000)
p = node.pubsub()
p.subscribe('foo')
assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
assert node.publish('foo', 'test message') == 1
message = wait_for_message(p)
assert isinstance(message, dict)
assert message == make_message('message', 'foo', 'test message')
# Cleanup pubsub connections
p.close()
@pytest.mark.xfail(reason="This test is buggy and fails randomly")
def test_publish_message_to_channel_other_server(self):
"""
Test that pubsub still works across the cluster on different nodes
"""
node_subscriber = self.get_strict_redis_node(7000)
p = node_subscriber.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
node_sender = self.get_strict_redis_node(7001)
# This should return 0 because of no connected clients to this server.
assert node_sender.publish('foo', 'test message') == 0
message = wait_for_message(p)
assert isinstance(message, dict)
assert message == make_message('message', 'foo', 'test message')
# Cleanup pubsub connections
p.close()
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_published_message_to_pattern(self, r):
p = r.pubsub()
p.subscribe('foo')
p.psubscribe('f*')
assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
assert wait_for_message(p) == make_message('psubscribe', 'f*', 2)
# 1 to pattern, 1 to channel
assert r.publish('foo', 'test message') == 2
message1 = wait_for_message(p)
message2 = wait_for_message(p)
assert isinstance(message1, dict)
assert isinstance(message2, dict)
expected = [
make_message('message', 'foo', 'test message'),
make_message('pmessage', 'foo', 'test message', pattern='f*')
]
assert message1 in expected
assert message2 in expected
assert message1 != message2
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(foo=self.message_handler)
assert wait_for_message(p) is None
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', 'foo', 'test message')
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{'f*': self.message_handler})
assert wait_for_message(p) is None
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', 'foo', 'test message',
pattern='f*')
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_unicode_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
channel = 'uni' + unichr(4456) + 'code'
channels = {channel: self.message_handler}
print(channels)
p.subscribe(**channels)
assert wait_for_message(p) is None
assert wait_for_message(p) is None
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', channel, 'test message')
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = 'uni' + unichr(4456) + '*'
channel = 'uni' + unichr(4456) + 'code'
p.psubscribe(**{pattern: self.message_handler})
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', channel,
'test message', pattern=pattern)
class TestPubSubAutoDecoding(object):
"These tests only validate that we get unicode values back"
channel = 'uni' + unichr(4456) + 'code'
pattern = 'uni' + unichr(4456) + '*'
data = 'abc' + unichr(4458) + '123'
def make_message(self, type, channel, data, pattern=None):
return {
'type': type,
'channel': channel,
'pattern': pattern,
'data': data
}
def setup_method(self, *args):
self.message = None
def message_handler(self, message):
self.message = message
def test_channel_subscribe_unsubscribe(self, o):
p = o.pubsub()
p.subscribe(self.channel)
assert wait_for_message(p) == self.make_message('subscribe',
self.channel, 1)
p.unsubscribe(self.channel)
assert wait_for_message(p) == self.make_message('unsubscribe',
self.channel, 0)
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_pattern_subscribe_unsubscribe(self, o):
p = o.pubsub()
p.psubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('psubscribe',
self.pattern, 1)
p.punsubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('punsubscribe',
self.pattern, 0)
def test_channel_publish(self, r):
p = r.pubsub()
p.subscribe(self.channel)
assert wait_for_message(p) == self.make_message('subscribe',
self.channel, 1)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('message',
self.channel,
self.data)
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_pattern_publish(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('psubscribe',
self.pattern, 1)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('pmessage',
self.channel,
self.data,
pattern=self.pattern)
def test_channel_message_handler(self, o):
p = o.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{self.channel: self.message_handler})
o.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
self.data)
# test that we reconnected to the correct channel
self.message = None
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + 'new data'
o.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
new_data)
@pytest.mark.xfail(reason="Pattern pubsub do not work currently")
def test_pattern_message_handler(self, o):
p = o.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{self.pattern: self.message_handler})
assert wait_for_message(p) is None
o.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
self.data,
pattern=self.pattern)
# test that we reconnected to the correct pattern
self.message = None
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + 'new data'
o.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
new_data,
pattern=self.pattern)
class TestPubSubRedisDown(object):
def test_channel_subscribe(self, r):
r = Redis(host='localhost', port=6390)
p = r.pubsub()
with pytest.raises(ConnectionError):
p.subscribe('foo')
def test_pubsub_thread_publish():
"""
This test will never fail but it will still show and be viable to use
and to test the threading capability of the connectionpool and the publish
mechanism.
"""
startup_nodes = [{"host": "127.0.0.1", "port": "7000"}]
r = RedisCluster(
startup_nodes=startup_nodes,
decode_responses=True,
max_connections=16,
max_connections_per_node=16,
)
def t_run(rc):
for i in range(0, 50):
rc.publish('foo', 'bar')
rc.publish('bar', 'foo')
rc.publish('asd', 'dsa')
rc.publish('dsa', 'asd')
rc.publish('qwe', 'bar')
rc.publish('ewq', 'foo')
rc.publish('wer', 'dsa')
rc.publish('rew', 'asd')
# Use this for debugging
# print(rc.connection_pool._available_connections)
# print(rc.connection_pool._in_use_connections)
# print(rc.connection_pool._created_connections)
try:
threads = []
for i in range(10):
t = threading.Thread(target=t_run, args=(r,))
threads.append(t)
t.start()
except Exception:
print("Error: unable to start thread")
class TestPubSubSubcommands(object):
@skip_if_server_version_lt('2.8.0')
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_pubsub_channels(self, r):
p = r.pubsub()
p.subscribe('foo', 'bar', 'baz', 'quux')
for i in range(4):
assert wait_for_message(p)['type'] == 'subscribe'
channels = sorted(r.pubsub_channels())
# assert channels == [b'bar', b'baz', b'foo', b'quux']
if channels != [b'bar', b'baz', b'foo', b'quux']:
import pdb
pdb.set_trace()
@skip_if_server_version_lt('2.8.0')
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_pubsub_numsub(self, r):
p1 = r.pubsub()
p1.subscribe('foo', 'bar', 'baz')
for i in range(3):
assert wait_for_message(p1)['type'] == 'subscribe'
p2 = r.pubsub()
p2.subscribe('bar', 'baz')
for i in range(2):
assert wait_for_message(p2)['type'] == 'subscribe'
p3 = r.pubsub()
p3.subscribe('baz')
assert wait_for_message(p3)['type'] == 'subscribe'
channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
assert channels == r.pubsub_numsub('foo', 'bar', 'baz')
@skip_if_server_version_lt('2.8.0')
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_pubsub_numpat(self, r):
p = r.pubsub()
p.psubscribe('*oo', '*ar', 'b*z')
for i in range(3):
assert wait_for_message(p)['type'] == 'psubscribe'
assert r.pubsub_numpat() == 3
class TestPubSubPings(object):
@skip_if_server_version_lt('3.0.0')
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_send_pubsub_ping(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
p.ping()
assert wait_for_message(p) == make_message(type='pong', channel=None,
data='',
pattern=None)
@skip_if_server_version_lt('3.0.0')
@pytest.mark.xfail(reason="Pattern pubsub is not fully supported in cluster mode")
def test_send_pubsub_ping_message(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
p.ping(message='hello world')
assert wait_for_message(p) == make_message(type='pong', channel=None,
data='hello world',
pattern=None)
class TestPubSubConnectionKilled(object):
@skip_if_server_version_lt('3.0.0')
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_connection_error_raised_when_connection_dies(self, r):
p = r.pubsub()
p.subscribe('foo')
assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
for client in r.client_list():
if client['cmd'] == 'subscribe':
r.client_kill_filter(_id=client['id'])
with pytest.raises(ConnectionError):
wait_for_message(p)
class TestPubSubTimeouts(object):
@pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode")
def test_get_message_with_timeout_returns_none(self, r):
p = r.pubsub()
p.subscribe('foo')
assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
assert p.get_message(timeout=0.01) is None
|
atest_web_ui.py | """
Testing Web UI using selenium
"""
import re
import unittest
import threading
from helper import CONSTANTS
from datetime import datetime, timedelta
from selenium import webdriver
from botapp import create_app
from botapp.models import MyBot, Message
from botapp.api_helpers import procedures
class WebUITestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# Suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.ERROR)
# start Firefox
try:
cls.client = webdriver.Firefox()
except Exception as e:
print e
logging.critical('Could not start Firefox browser for running '
'selenium tests. Error{error}'.format(
error=e.message))
# skip the tests if browser is not launched
if cls.client:
# Create application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# Empty any data if present
MyBot.objects.delete()
Message.objects.delete()
MyBot.drop_collection()
Message.drop_collection()
# Populate database
MyBot.generate_fake(5)
Message.generate_fake(100)
# start flask server in another thread.
threading.Thread(target=cls.app.run).start()
@classmethod
def tearDownClass(cls):
if cls.client:
# Stop the flask server and close the browser
cls.client.get('http://localhost:5000/web/shutdown')
cls.client.close()
# Remove all data
MyBot.objects.delete()
Message.objects.delete()
MyBot.drop_collection()
Message.drop_collection()
# Remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available.')
def tearDown(self):
pass
def test_index_page(self):
base_address = 'http://localhost:5000/web/index'
# Navigate to home page
self.client.get(base_address)
self.assertTrue(re.search('Telegram Bot App', self.client.page_source))
# Redirect to index page
self.client.find_element_by_link_text('Home').click()
self.assertTrue('Telegram Bot App' in self.client.page_source)
# Check for messages appearing on home page.
msgs = Message.objects.all().order_by('-date')
for msg in msgs[1:10]:
self.assertTrue(msg.text_content in self.client.page_source)
def test_filtering_method(self):
bot = MyBot.objects.first()
# Add partially matching messages.
Message(date=datetime.now() - timedelta(minutes=30),
# Un-match time.
sender_username='tester1',
sender_firstname='test',
sender_lastname='bot',
text_content='testmessage',
bot_id=bot.bot_id).save()
Message(date=datetime.now() - timedelta(minutes=10),
sender_username='tester2', # Non-matching sender-username.
sender_firstname='test',
sender_lastname='bot',
text_content='testmessage',
bot_id=bot.bot_id).save()
Message(date=datetime.now() - timedelta(minutes=10),
sender_username='tester1',
sender_firstname='abc',
# Non-matching first-name, last-name
sender_lastname='def',
text_content='testmessage',
bot_id=bot.bot_id).save()
Message(date=datetime.now() - timedelta(minutes=10),
sender_username='tester1',
sender_firstname='test',
sender_lastname='bot',
text_content='message', # Non-matching text content
bot_id=bot.bot_id).save()
Message(date=datetime.now() - timedelta(minutes=10),
sender_username='Tester1',
sender_firstname='Test',
sender_lastname='Bot',
text_content='testmessage',
bot_id=11111).save() # Non-matching botid
# Add expected message.
Message(date=datetime.now() - timedelta(minutes=10),
sender_username='tester1',
sender_firstname='test',
sender_lastname='bot',
text_content='testmessage',
bot_id=bot.bot_id).save()
base_address = 'http://127.0.0.1:5000/web/index'
# navigate to home page
self.client.get(base_address)
# Navigate to filering page
self.client.find_element_by_link_text('Filter').click()
self.assertTrue(re.search('Decide Filtering Criteria',
self.client.page_source, re.IGNORECASE))
# Add some filtering criteria
self.client.find_element_by_name('fn_ln_field').send_keys('test')
self.client.find_element_by_name('time_field').send_keys('30')
self.client.find_element_by_name('time_int_field').send_keys('30')
self.client.find_element_by_name('username_field').send_keys('tester1')
self.client.find_element_by_name('text_field').send_keys('test')
self.client.find_element_by_name('submit').click()
# Ensure that we went to right page
self.assertTrue(re.search('Filtered Messages',
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('Text:\s+test', self.client.page_source,
re.IGNORECASE))
self.assertTrue(re.search('sender username:\s+tester1',
self.client.page_source,
re.IGNORECASE))
self.assertTrue(re.search('sender name:\s+test', self.client.page_source,
re.IGNORECASE))
self.assertTrue(re.search('Time:\s+30', self.client.page_source,
re.IGNORECASE))
self.assertTrue(re.search('received from:\s+test\s+bot',
self.client.page_source,
re.IGNORECASE))
def test_add_test_bot(self):
base_address = 'http://127.0.0.1:5000/web/index'
# navigate to home page
self.client.get(base_address)
# Navigate to filtering page
self.client.find_element_by_link_text('New-bot').click()
self.assertTrue(re.search('Add a new Bot',
self.client.page_source, re.IGNORECASE))
# add a test bot
self.client.find_element_by_name('token').send_keys('dummy-bot-token')
self.client.find_element_by_name('is_test_bot').click()
self.client.find_element_by_name('submit').click()
bot = MyBot.objects(token='dummy-bot-token').first()
self.assertIsNotNone(bot)
self.assertFalse(bot.state)
# Assertions.
self.assertTrue(re.search('Bot\s+{uname}'.format(uname=bot.username),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('Bot\s+name:\s+test\s+bot',
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('ID:\s+{botid}'.format(botid=bot.bot_id),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('token:\s+{token}'.format(token=bot.token),
self.client.page_source, re.IGNORECASE))
self.assertTrue('Testbot Bot:{uname} successfully added to '
'database'.format(uname=bot.username)
in self.client.page_source)
def test_add_valid_bot(self):
base_address = 'http://127.0.0.1:5000/web/index'
# navigate to home page
self.client.get(base_address)
# Navigate to filtering page
self.client.find_element_by_link_text('New-bot').click()
self.assertTrue(re.search('Add a new Bot',
self.client.page_source, re.IGNORECASE))
# add a test bot
self.client.find_element_by_name('token')\
.send_keys(CONSTANTS.LIVE_BOTS.get(1))
self.client.find_element_by_name('submit').click()
bot = MyBot.objects(token=CONSTANTS.LIVE_BOTS.get(1)).first()
self.assertIsNotNone(bot)
self.assertTrue(bot.state)
# Assertions.
self.assertTrue(re.search('Bot\s+{uname}'.format(uname=bot.username),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search(
'Bot\s+name:\s+{fname}\s+{lname}'.format(fname=bot.first_name,
lname=bot.last_name),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('ID:\s+{botid}'.format(botid=bot.bot_id),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('token:\s+{token}'.format(token=bot.token),
self.client.page_source, re.IGNORECASE))
self.assertTrue('New bot:{uname} successfully added and started '
'polling.'.format(uname=bot.username)
in self.client.page_source)
# Force disable live bot from polling.
self.assertEqual(procedures.stop_bot(botid=bot.bot_id), 1)
def test_edit_valid_bot(self):
base_address = 'http://127.0.0.1:5000/web/index'
# navigate to home page
self.client.get(base_address)
# Navigate to filering page
self.client.find_element_by_link_text('Edit-Bot').click()
self.assertTrue(re.search('Toggle\s+\(Enable/\s+Disable\)\s+Bot',
self.client.page_source, re.IGNORECASE))
# add a test bot
self.assertTrue(procedures.add_bot(token=CONSTANTS.LIVE_BOTS.get(1)))
bot = MyBot.objects(token=CONSTANTS.LIVE_BOTS.get(1),
test_bot=False).first()
self.assertIsNotNone(bot)
self.assertTrue(bot.state)
# self.client.find_elements_by_name('status_field').send_keys('tomato')
self.client.find_element_by_name('choose_bot').send_keys(
bot.username.lower())
self.client.find_element_by_name('toggle').click()
# check for success
self.assertTrue('Bot:{uname} successfully stopped polling'.format(
uname=bot.username) in self.client.page_source)
self.assertTrue('Disabled' in self.client.page_source)
# Enable the bot
self.client.find_element_by_name('choose_bot').send_keys(bot.username)
self.client.find_element_by_name('toggle').click()
# check for success
self.assertTrue('Bot:{uname} successfully started polling'.format(
uname=bot.username) in self.client.page_source)
self.assertTrue('Enabled' in self.client.page_source)
# Force disable live bot from polling.
self.assertEqual(procedures.stop_bot(botid=bot.bot_id), 1)
def test_get_bot_info(self):
base_address = 'http://127.0.0.1:5000/web/index'
# Add a special bot and some expected messages.
bot = MyBot(bot_id=123456, username='special test bot',
token='special-dummy-token', first_name='special',
last_name='bot').save()
for i in range(5):
Message(bot_id=bot.bot_id, text_content='message'+str(i)).save()
# navigate to home page
self.client.get(base_address)
# Navigate to filtering page
self.client.find_element_by_link_text('Get-Bot-Info').click()
self.assertTrue(re.search('Get Bot Information',
self.client.page_source, re.IGNORECASE))
self.client.find_element_by_name('choose_bot').send_keys(bot.username)
self.client.find_element_by_name('submit').click()
# Redirected to bot information page. Make Assertions.
self.assertTrue(re.search(bot.username, self.client.page_source,
re.IGNORECASE))
self.assertTrue(re.search('{fname}\s+{lname}'.format(
fname=bot.first_name, lname=bot.last_name),
self.client.page_source, re.IGNORECASE))
self.assertTrue(re.search('Token:\s+{token}'.format(token=bot.token),
self.client.page_source, re.IGNORECASE))
msgs = Message.objects(bot_id=bot.bot_id).all()
for msg in msgs:
self.assertTrue(msg.text_content in self.client.page_source)
|
hybrid.py | import pandas as pd
import numpy as np
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
import copy
import datetime
import time
from multiprocessing.pool import ThreadPool
from movies_cinema_imdb import get_global_indices_map_for_tmdb, get_global_md, get_global_inverse_indices, get_global_cosine_sim, get_global_smd, get_global_indices_map
path = '../the-movies-dataset/'
# md = pd.read_csv(path + 'pop_new_metadata.csv')
# links = pd.read_csv(path + 'pop_new_links.csv')
# credits = pd.read_csv(path + 'credits.csv')
# keywords = pd.read_csv(path + 'keywords.csv')
# del md['useless']
# del links['useless']
# del credits['useless']
# del keywords['useless']
# md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
# # md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(lambda x: [str(x).split('-')[0]] if x != np.nan else [])
# # md['year'] = md['year'].fillna('[]').apply(lambda x: [str(int(x))] if isinstance(x, int) or isinstance(x, float) or isinstance(x, str) else [])
# # md['year'] = md['year'].fillna('[]').apply(literal_eval)
# md['popularity'] = md['popularity'].fillna('[]').apply(lambda x: [str(int(x))] if isinstance(x, float) or isinstance(x, int) else [])
# links = links[links['tmdbId'].notnull()]['tmdbId'].astype('int')
# #md = md.drop([19730, 29503, 35587])
# md['id'] = md['id'].astype('int')
# smd = md[md['id'].isin(links)]
# # smd['tagline'] = smd['tagline'].fillna('')
# # smd['description'] = smd['overview'] + smd['tagline']
# # smd['description'] = smd['description'].fillna('')
# keywords['id'] = keywords['id'].astype('int')
# credits['id'] = credits['id'].astype('int')
# md['id'] = md['id'].astype('int')
# md = md.merge(credits, on='id')
# md = md.merge(keywords, on='id')
# smd = md[md['id'].isin(links)]
# smd['cast'] = smd['cast'].apply(literal_eval)
# smd['crew'] = smd['crew'].apply(literal_eval)
# smd['keywords'] = smd['keywords'].apply(literal_eval)
# # smd['cast_size'] = smd['cast'].apply(lambda x: len(x))
# # smd['crew_size'] = smd['crew'].apply(lambda x: len(x))
# def get_director(x):
# for i in x:
# if i['job'] == 'Director':
# return i['name']
# return np.nan
# indices = pd.Series(smd.index, index=smd['title'])
# smd['keywords'] = smd['keywords'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
# smd['cast'] = smd['cast'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
# smd['cast'] = smd['cast'].apply(lambda x: x[:3] if len(x) >=3 else x)
# smd['cast'] = smd['cast'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
# smd['director'] = smd['crew'].apply(get_director)
# smd['director'] = smd['director'].astype('str').apply(lambda x: str.lower(x.replace(" ", "")))
# smd['director'] = smd['director'].apply(lambda x: [x,x,x])
# s = smd.apply(lambda x: pd.Series(x['keywords']),axis=1).stack().reset_index(level=1, drop=True)
# s.name = 'keyword'
# s = s.value_counts()
# s = s[s > 1]
# stemmer = SnowballStemmer('english')
# stemmer.stem('dogs')
# def filter_keywords(x):
# words = []
# for i in x:
# if i in s:
# words.append(i)
# return words
# smd['keywords'] = smd['keywords'].apply(filter_keywords)
# smd['keywords'] = smd['keywords'].apply(lambda x: [stemmer.stem(i) for i in x])
# smd['keywords'] = smd['keywords'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
# smd['soup'] = smd['keywords'] + smd['cast'] + smd['director'] + smd['genres'] + smd['popularity'] # + smd['year']
# smd['soup'] = smd['soup'].apply(lambda x: ' '.join(x))
# count = CountVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')
# count_matrix = count.fit_transform(smd['soup'])
# cosine_sim = cosine_similarity(count_matrix, count_matrix)
# smd = smd.reset_index()
# titles = smd['title']
# indices = pd.Series(smd.index, index=smd['title'])
# inverse_indices = pd.Series(smd['title'], index=smd.index)
# def convert_int(x):
# try:
# return int(x)
# except:
# return np.nan
# id_map = pd.read_csv(path + 'pop_new_links.csv')[['movieId', 'tmdbId']]
# id_map['tmdbId'] = id_map['tmdbId'].apply(convert_int)
# id_map.columns = ['movieId', 'id']
# id_map = id_map.merge(smd[['title', 'id']], on='id').set_index('title')
# # id_map = id_map.set_index('tmdbId')
# indices_map = id_map.set_index('id')
# indices_map_for_tmdb = id_map.set_index('movieId')
# start_update_svd = None
# time_left = None
# def get_time_left():
# while True:
# global start_update_svd, time_left
# now = datetime.datetime.now()
# elapsedTime = now - start_update_svd
# datetime.timedelta(0, 125, 749430)
# diff_m, diff_s = divmod(elapsedTime.total_seconds(), 60)
# minutes = 0-int(diff_m) # 3
# seconds = 20-int(diff_s) # 59
# time_left = int((minutes)*60 + seconds)
# # return str(minutes) + ":" + str(seconds)[:3] + " minutes"
# def wait_until_time_left(userId):
# global time_left
# print("waiting")
# time.sleep(int(time_left))
# print("start final res")
# return final_res(userId)
# def fork_for_movies(userId):
# pool = ThreadPool(processes=1)
# async_result = pool.apply_async(wait_until_time_left, (userId))
# print("thread")
# return_one = True
# if return_one:
# print("first done")
# return_one = False
# return [], time_left
# print("thread waiting")
# return_val = async_result.get()
# print("thread pass")
# return return_val, -1
svd = SVD()
def update_svd():
while True:
global svd
# start_update_svd = datetime.datetime.now()
ratings = pd.read_csv(path + 'pop_new_ratings.csv')
del ratings['useless']
reader = Reader()
data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
data.split(n_folds=10)
svd2 = SVD()
trainset = data.build_full_trainset()
svd2.fit(trainset)
print("done", end=' ')
svd = copy.deepcopy(svd2)
from threading import Thread
Thread(target=update_svd).start()
# Thread(target=get_time_left).start()
def get_svd():
global svd
return svd
def list_movies_seen_user(userId, number=False):
indices_map_for_tmdb = get_global_indices_map_for_tmdb()
md = get_global_md()
ratings = pd.read_csv(path + 'pop_new_ratings.csv')
del ratings['useless']
movie_liked_user = set([int(i) for i in ratings['movieId'][ratings['userId'] == userId]])
if number:
return len(movie_liked_user)
titles_movies = []
for i in movie_liked_user:
tmdbId = int(indices_map_for_tmdb['id'][i])
title = md.loc[md['id'] == tmdbId]['title']
title = title.values[0]
titles_movies.append(title)
return titles_movies
def predict_one_movie(userId, tmdbId):
global svd
indices_map = get_global_indices_map()
try:
return str(svd.predict(userId, indices_map.loc[tmdbId]['movieId']).est)[:4] + "/5"
except:
return "No info"
def hybrid_recommandation(userId, idx):
global svd
indices_map_for_tmdb = get_global_indices_map_for_tmdb()
md = get_global_md()
inverse_indices = get_global_inverse_indices()
cosine_sim = get_global_cosine_sim()
smd = get_global_smd()
indices_map = get_global_indices_map()
tmdbId = int(indices_map_for_tmdb['id'][idx])
title = md.loc[md['id'] == tmdbId]['title']
title = title.values[0]
idx = 0
for i, t in enumerate(inverse_indices.values):
if t == title:
idx = i
break
sim_scores = list(enumerate(cosine_sim[int(idx)]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:50]
movie_indices = [i[0] for i in sim_scores]
movies = smd.iloc[movie_indices][['title','id']]
def pred(x):
try:
return svd.predict(userId, indices_map.loc[x]['movieId']).est
except:
return 0
movies['recommanded'] = movies['id'].apply(pred)
movies = movies.sort_values('recommanded', ascending=False)
return movies.head(30)
def movies_from_last_one(userId):
indices_map_for_tmdb = get_global_indices_map_for_tmdb()
md = get_global_md()
ratings = pd.read_csv(path + 'pop_new_ratings.csv')
del ratings['useless']
movie_liked_user = [int(i) for i in ratings['movieId'][ratings['userId'] == userId]]
if len(movie_liked_user) < 1:
return []
last_movie = movie_liked_user[-1]
recommanded = hybrid_recommandation(userId, last_movie)
best_movie = {}
for r in recommanded.values:
title = r[0]
if title in best_movie:
best_movie[title] = max(float(r[2]), best_movie[title])
continue
best_movie[title] = float(r[2])
# for r in recommanded.values:
# title = r[0]
# if len(title) > 25:
# continue
# best_movie.add(title)
# elimino i film che ho gia' visto
for idx in movie_liked_user:
tmdbId = int(indices_map_for_tmdb['id'][idx])
t = md.loc[md['id'] == tmdbId]['title']
t = t.values[0]
if t in best_movie:
del best_movie[t]
best_movie_sorted = sorted(best_movie.items(), key=lambda x: x[1], reverse=True)
return best_movie_sorted[:7]
def final_res(userId):
indices_map_for_tmdb = get_global_indices_map_for_tmdb()
md = get_global_md()
ratings = pd.read_csv(path + 'pop_new_ratings.csv')
del ratings['useless']
movie_liked_user = set([int(i) for i in ratings['movieId'][ratings['userId'] == userId]])
if len(movie_liked_user) < 1:
return []
# reader = Reader()
# data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
# data.split(n_folds=10)
# svd = SVD()
# trainset = data.build_full_trainset()
# svd.fit(trainset)
# best_movie = {}
# # movies = smd.iloc[movie_indices][['title','id']]
# # dovrei iterare per tutti i movieId
# for i in md['id'].values:
# # movieId non deve essere tra quelli che ha gia' fatto il rate
# if i in movie_liked_user:
# continue
# try:
# pred = svd.predict(userId, indices_map.loc[i]['movieId']).est
# title = md.loc[md['id'] == i]['title']
# title = str(title.values[0])
# best_movie[title] = pred
# except:
# continue
# best_movie_sorted = sorted(best_movie.items(), key=lambda x: x[1], reverse=True)
# return best_movie_sorted[:3]
best_movie = {}
for movie in movie_liked_user:
recommanded = hybrid_recommandation(userId, movie)
for r in recommanded.values:
title = r[0]
if title in best_movie:
best_movie[title] = max(float(r[2]), best_movie[title])
continue
best_movie[title] = float(r[2])
# elimino i film che ho gia' visto
for idx in movie_liked_user:
tmdbId = int(indices_map_for_tmdb['id'][idx])
t = md.loc[md['id'] == tmdbId]['title']
t = t.values[0]
if t in best_movie.keys():
del best_movie[t]
# copy_best_movie = copy.deepcopy(best_movie)
# for key, value in copy_best_movie.items():
# if len(str(key)) > 25:
# del best_movie[key]
best_movie_sorted = sorted(best_movie.items(), key=lambda x: x[1], reverse=True)
return best_movie_sorted[:3] |
scanport2.py | #-*- coding:utf-8 -*-
#2105-03-25
Port = [80,21,23,22,25,110,443,1080,3306,3389,1521,1433]
Server = ['HTTP','FTP','TELNET','SSH','SMTP','POP3','HTTPS','SOCKS','MYSQL','Misrosoft RDP','Oracle','Sql Server']
result = []
import socket
import sys
import threading
import time
def get_remote_machine_info(Domain):
try:
return socket.gethostbyname(Domain)
except socket.error,e:
print '%s: %s'%(Domain,e)
return 0
def scan(Domain,port,server):
temp = []
try:
s = socket.socket()
print "Attempting to connect to "+Domain+': '+str(port)
s.connect((Domain,port))
temp.append(port)
temp.append(server)
result.append(temp)
s.close()
except:
pass
def output(Domain,IP):
if result:
print '\n'+Domain+': --> '+IP
print '\nThe Open Port:'
for i in result:
print Domain+': %4d -->%s'%(i[0],i[1])
else:
print 'None Port!'
def main():
print '''\nX-man Port Scan 2.0
payload:./Scan.py www.xxx.zzz'''
payload = sys.argv
IP = get_remote_machine_info(payload[1])
print '\n'
for port,server in zip(Port,Server):
t = threading.Thread(target=scan,args=(payload[1],port,server,)) #for循环创建线程,每个端口开一个线程
t.setDaemon(True) #将线程声明为守护线程,使其可快速退出
t.start()
time.sleep(0.1) #每个线程之间设置时间间隔,避免输出混乱
output(payload[1],IP)
if __name__=='__main__':
main()
|
strategies.py | import logging
import threading
import time
from typing import List
from brownie import Contract, chain
from eth_utils import encode_hex, event_abi_to_log_topic
from yearn.utils import safe_views
from yearn.multicall2 import fetch_multicall
from yearn.events import create_filter, decode_logs
STRATEGY_VIEWS_SCALED = [
"maxDebtPerHarvest",
"minDebtPerHarvest",
"totalDebt",
"totalGain",
"totalLoss",
"estimatedTotalAssets",
"lentTotalAssets",
"balanceOfPool",
"balanceOfWant",
]
STRATEGY_EVENTS = ["Harvested"]
logger = logging.getLogger(__name__)
class Strategy:
def __init__(self, strategy, vault):
self.strategy = Contract(strategy)
self.vault = vault
try:
self.name = self.strategy.name()
except ValueError:
self.name = strategy[:10]
self._views = safe_views(self.strategy.abi)
self._harvests = []
self._topics = [
[
encode_hex(event_abi_to_log_topic(event))
for event in self.strategy.abi
if event["type"] == "event" and event["name"] in STRATEGY_EVENTS
]
]
self._done = threading.Event()
self._thread = threading.Thread(target=self.watch_events, daemon=True)
def __repr__(self) -> str:
return f"<Strategy {self.strategy} name={self.name}>"
def __eq__(self, other):
if isinstance(other, Strategy):
return self.strategy == other.strategy
if isinstance(other, str):
return self.strategy == other
raise ValueError("Strategy is only comparable with [Strategy, str]")
def watch_events(self):
start = time.time()
self.log_filter = create_filter(str(self.strategy), topics=self._topics)
for block in chain.new_blocks(height_buffer=12):
logs = self.log_filter.get_new_entries()
events = decode_logs(logs)
self.process_events(events)
if not self._done.is_set():
self._done.set()
logger.info("loaded %d harvests %s in %.3fs", len(self._harvests), self.name, time.time() - start)
time.sleep(300)
def process_events(self, events):
for event in events:
if event.name == "Harvested":
block = event.block_number
logger.debug("%s harvested on %d", self.name, block)
self._harvests.append(block)
def load_harvests(self):
if not self._thread._started.is_set():
self._thread.start()
self._done.wait()
@property
def harvests(self) -> List[int]:
self.load_harvests()
return self._harvests
def describe(self, block=None):
results = fetch_multicall(
*[[self.strategy, view] for view in self._views],
[self.vault.vault, "strategies", self.strategy],
block=block,
)
info = dict(zip(self._views, results))
info.update(results[-1].dict())
for view in STRATEGY_VIEWS_SCALED:
if view in info:
info[view] = (info[view] or 0) / self.vault.scale
# unwrap structs
for view in info:
if hasattr(info[view], '_dict'):
info[view] = info[view].dict()
return info
|
lambda_executors.py | import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
environment['_HANDLER'] = handler
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# TODO: Interweaving stdout/stderr currently not supported
log_output = ''
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file, Util.get_java_classpath(main_file))
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
testing.py | """
Testing utilities.
"""
import os
import re
import struct
import threading
import functools
from tempfile import NamedTemporaryFile
import numpy as np
from numpy import testing
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_array_less, assert_array_almost_equal_nulp,
assert_equal, TestCase, assert_allclose,
assert_almost_equal, assert_, assert_warns,
assert_no_warnings)
from ._warnings import expected_warnings
import warnings
from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte
import pytest
SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
raises = pytest.raises
fixture = pytest.fixture
# true if python is running in 32bit mode
# Calculate the size of a void * pointer in bits
# https://docs.python.org/3/library/struct.html
arch32 = struct.calcsize("P") * 8 == 32
def assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def doctest_skip_parser(func):
""" Decorator replaces custom skip test markup in doctests
Say a function has a docstring::
>>> something, HAVE_AMODULE, HAVE_BMODULE = 0, False, False
>>> something # skip if not HAVE_AMODULE
0
>>> something # skip if HAVE_BMODULE
0
This decorator will evaluate the expression after ``skip if``. If this
evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If
False, then the comment is just removed. The expression is evaluated in the
``globals`` scope of `func`.
For example, if the module global ``HAVE_AMODULE`` is False, and module
global ``HAVE_BMODULE`` is False, the returned function will have docstring::
>>> something # doctest: +SKIP
>>> something + else # doctest: +SKIP
>>> something # doctest: +SKIP
"""
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
# Works as a function decorator
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
# Works as a class decorator
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(image, plugin, suffix):
"""Save and read an image using a specified plugin"""
if '.' not in suffix:
suffix = '.' + suffix
temp_file = NamedTemporaryFile(suffix=suffix, delete=False)
fname = temp_file.name
temp_file.close()
io.imsave(fname, image, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
"""Check roundtrip behavior for color images.
All major input types should be handled as ubytes and read
back correctly.
"""
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision loss']):
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['sign loss|precision loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
with expected_warnings(['precision loss']):
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
"""Check the roundtrip behavior for images that support most types.
All major input types should be handled.
"""
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision|\A\Z']):
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss|\A\Z']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['precision loss|sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
"""Default package level setup routine for skimage tests.
Import packages known to raise warnings, and then
force warnings to raise errors.
Also set the random seed to zero.
"""
warnings.simplefilter('default')
from scipy import signal, ndimage, special, optimize, linalg
from scipy.io import loadmat
from skimage import viewer
np.random.seed(0)
warnings.simplefilter('error')
def teardown_test():
"""Default package level teardown routine for skimage tests.
Restore warnings to default behavior
"""
warnings.simplefilter('default')
def test_parallel(num_threads=2):
"""Decorator to run the same function multiple times in parallel.
This decorator is useful to ensure that separate threads execute
concurrently and correctly while releasing the GIL.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
"""
assert num_threads > 0
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
threads = []
for i in range(num_threads - 1):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
threads.append(thread)
for thread in threads:
thread.start()
result = func(*args, **kwargs)
for thread in threads:
thread.join()
return result
return inner
return wrapper
if __name__ == '__main__':
color_check('pil')
mono_check('pil')
mono_check('pil', 'bmp')
mono_check('pil', 'tiff')
|
baybot.py | # -*- coding: utf-8 -*-
#Cbay_Bot
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='ErtRsgoCpgqeNFnkWm9f.Rg+92lfWo2YT7C/n+rivZW.qPPY8svNM72iEIzrtr7vXzCZKs3GQsji3dEF5ZY7IM4=')
cl.loginResult()
print "Cl-Login Success\n"
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token='Eq85c6dIto9gZ5Ze1Ky6.H2hLIeUwny7G0OQeYMppTG.E8M/19wknE0+CCdWHUgQe9Ehbr+k2ojSfOB/VR4NkqY=')
ki.loginResult()
print "Ki-Login Success\n"
kk = LINETCR.LINE()
#kk.login(qr=True)
kk.login(token='Eq6FPz50Prqx0Rac7eZ9.aX69s+8VHHX8+TIz5so52q.CZNFBncnNhVye30w5++JpLuEyKxKCpPq0oxmcX4plsw=')
kk.loginResult()
print "Kk-Login Success\n"
kc = LINETCR.LINE()
#kc.login(qr=True)
kc.login(token='EqLuiE746IIWuTlDuZz0.+qBpV8E+/wZBHli68Pjr0a.NiPG+s7y6IuP8t6VM1HcPZfC99AeE8anujYr1znR1YY=')
kc.loginResult()
print "Kc-Login Success\n"
kr = LINETCR.LINE()
#kr.login(qr=True)
kr.login(token='EqAQunIGccNxC4Ug7V4e.x5MvPa4bCunGKJgtTwS1RG.rooWkpUYbwsnueixW0GZcp5TKtQ+CSixFICrbo/dz4U=')
kr.loginResult()
print "Kr-Login Success\n"
km = LINETCR.LINE()
#km.login(qr=True)
km.login(token='ErWh9E6nmJVGGfghLI4d.oEIB/s3Rr9E51pit4yenRq.ITqHk+hskUjLZNVh/vYLK4tqQgBKcUborqVgaYc7mLU=')
km.loginResult()
print "Km-Login Success\n\n=====[Sukses All Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
selfMessage ="""
╔═════════════════════════
║ ☆☞ S E L F ☜☆
╠═════════════════════════
╠➩〘Hi〙
╠➩〘Me〙
╠➩〘Mymid〙
╠➩〘Mid @〙
╠➩〘SearchID: (ID LINE)〙
╠➩〘Checkdate (DD/MM/YY)〙
╠➩〘Kalender〙
╠➩〘Steal contact〙
╠➩〘Pp @〙
╠➩〘Cover @〙
╠➩〘Auto like〙
╠➩〘Scbc Text〙
╠➩〘Cbc Text〙
╠➩〘Gbc Text〙
╠➩〘Getbio @〙
╠➩〘Getinfo @〙
╠➩〘Getname @〙
╠➩〘Getprofile @〙
╠➩〘Getcontact @〙
╠➩〘Getvid @〙
╠➩〘Friendlist〙
╠═════════════════════════
║
╚═════════════════════════
"""
botMessage ="""
╔═════════════════════════
║ ☆☞ B O T ☜☆
╠═════════════════════════
╠➩〘Absen〙
╠➩〘Respon〙
╠➩〘Runtime〙
╠➩〘Mbok copy @〙
╠➩〘bot1 copy @〙
╠➩〘bot2 copy @〙
╠➩〘bot3 copy @〙
╠➩〘bot4 copy @〙
╠➩〘Backup all〙
╠➩〘/bio Text〙
╠➩〘@bye (Usir Mbok)〙
╠➩〘Minggat all(Usir Semua)〙
╠═════════════════════════
║
╚═════════════════════════
"""
mediaMessage ="""
╔═════════════════════════
║ ☆☞ M E D I A ☜☆
╠═════════════════════════
╠➩〘Gift〙
╠➩〘Gift1 @ s/d Gift10 @〙
╠➩〘Giftbycontact〙
╠➩〘All gift〙
╠➩〘Gif gore〙
╠➩〘Google: (Text)〙
╠➩〘Playstore NamaApp〙
╠➩〘Fancytext: Text〙
╠➩〘/musik Judul-Penyanyi〙
╠➩〘/lirik Judul-Penyanyi〙
╠➩〘/musrik Judul-Penyanyi〙
╠➩〘/ig UrsnameInstagram〙
╠➩〘Checkig UrsnameInstagram〙
╠➩〘/apakah Text (Kerang Ajaib)〙
╠➩〘/kapan Text (Kerang Ajaib)〙
╠➩〘/hari Text (Kerang Ajaib)〙
╠➩〘/berapa Text (Kerang Ajaib)〙
╠➩〘/berapakah Text〙
╠➩〘Youtubelink: Judul Video〙
╠➩〘Youtubevideo: Judul Video〙
╠➩〘Youtubesearch: Judul Video〙
╠➩〘Image NamaGambar〙
╠➩〘Say-id Text〙
╠➩〘Say-en Text〙
╠➩〘Say-jp Text〙
╠➩〘Image NamaGambar〙
╠➩〘Tr-id Text (Translate En Ke ID〙
╠➩〘Tr-en Text (Translate ID Ke En〙
╠➩〘Tr-th Text (Translate ID Ke Th〙
╠➩〘Id@en Text (Translate ID Ke En〙
╠➩〘Id@th Text (Translate ID Ke TH〙
╠➩〘En@id Text (Translate En Ke ID〙
╠═════════════════════════
║
╚═════════════════════════
"""
groupMessage ="""
╔═════════════════════════
║ ☆☞ G R O U P ☜☆
╠═════════════════════════
╠➩〘Welcome〙
╠➩〘Say welcome〙
╠➩〘Invite creator〙
╠➩〘Setview〙
╠➩〘Viewseen〙
╠➩〘Gn: (NamaGroup)〙
╠➩〘Tag all〙
╠➩〘Recover〙
╠➩〘Cancel〙
╠➩〘Cancelall〙
╠➩〘Gcreator〙
╠➩〘Ginfo〙
╠➩〘Gurl〙
╠➩〘List group〙
╠➩〘Pict group: (NamaGroup)〙
╠➩〘Spam: (Text)〙
╠➩〘Spam〙
╠➩〘Add all〙
╠➩〘Kick: (Mid)〙
╠➩〘Invite: (Mid)〙
╠➩〘Invite〙
╠➩〘Memlist〙
╠➩〘Getgroup image〙
╠➩〘Urlgroup Image〙
╠═════════════════════════
║
╚═════════════════════════
"""
tjia="ud5c39db7ab34ca5fb15f99ffef31047f"
setMessage ="""
╔═════════════════════════
║ ☆☞ S E T ☜☆
╠═════════════════════════
╠➩〘Sambutan on/off〙
╠➩〘Url on/off〙
╠➩〘Alwaysread on/off〙
╠➩〘Sider on/off〙
╠➩〘Contact on/off〙
╠➩〘Simisimi on/off〙
╠═════════════════════════
║ ️
╚═════════════════════════
"""
creatorMessage ="""
╔═════════════════════════
║ ☆☞ C R E A T O R ☜☆
╠═════════════════════════
╠➩〘Admin add @〙
╠➩〘Admin remove @〙
╠➩〘/cnMbok〙
╠➩〘Cnbot1〙
╠➩〘Cnbot2〙
╠➩〘Cnbot3〙
╠➩〘Cnbot4〙
╠➩〘Crash〙
╠➩〘Kickall〙
╠➩〘Bc: (Text)〙
╠➩〘Nk: @〙
╠➩〘Ulti @〙
╠➩〘Join group: (NamaGroup〙
╠➩〘Leave group: (NamaGroup〙
╠➩〘Leave all group〙
╠➩〘Bot restart〙
╠➩〘Turn off〙
╠═════════════════════════
║
╚═════════════════════════
"""
adminMessage ="""
╔═════════════════════════
║ ☆☞ A D M I N ☜☆
╠═════════════════════════
╠➩〘Admin list〙
╠➩〘Ban〙
╠➩〘Unban〙
╠➩〘Ban @〙
╠➩〘Unban @〙
╠➩〘Ban list〙
╠➩〘Clear ban〙
╠➩〘Kill〙
╠➩〘Kick @〙
╠➩〘Set member: (Jumblah)〙
╠➩〘Ban group: (NamaGroup〙
╠➩〘Del ban: (NamaGroup〙
╠➩〘List ban〙
╠➩〘Kill ban〙
╠➩〘Glist〙
╠➩〘Glistmid〙
╠➩〘Details group: (Gid)〙
╠➩〘Cancel invite: (Gid)〙
╠➩〘Invitemeto: (Gid)〙
╠➩〘Mbok acc invite〙
╠➩〘bot1 acc invite〙
╠➩〘bot2 acc invite〙
╠➩〘bot3 acc invite〙
╠➩〘bot4 acc invite〙
╠➩〘Removechat〙
╠➩〘Join on/off〙
╠➩〘Joincancel on/off〙
╠➩〘Respon on/off〙
╠➩〘Responkick on/off〙
╠➩〘Leave on/off〙
╠➩〘All join / (bot1/2/3/4 Join)〙
╠═════════════════════════
║
╚═════════════════════════
"""
helpMessage ="""
╔═════════════════════════
║ ☆☞ H E L P ☜☆
╠═════════════════════════
╠➩〘Help protect〙
╠➩〘Help self〙
╠➩〘Help bot〙
╠➩〘Help group〙
╠➩〘Help set〙
╠➩〘Help media〙
╠➩〘Help admin〙
╠➩〘Help creator〙
╠➩〘Owner〙
╠➩〘Admin〙
╠➩〘Speed〙
╠➩〘Speed test〙
╠➩〘Status〙
╠═════════════════════════
║
╚═════════════════════════
"""
protectMessage ="""
╔═════════════════════════
║ ☆☞ P R O T E C T ☜☆
╠═════════════════════════
╠➩〘Allprotect on/off〙
╠➩〘Autocancel on/off〙
╠➩〘Qr on/off〙
╠➩〘Autokick on/off〙
╠➩〘Ghost on/off〙
╠➩〘Invitepro on/off〙
╠═════════════════════════
║
╚═════════════════════════
"""
KAC=[cl,ki,kk,kc,kr]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = kr.getProfile().mid
Emid = km.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid]
Creator=["ud5c39db7ab34ca5fb15f99ffef31047f","u39e89bf92deb47d7b9d4f213e1810dad"]
admin=["ud5c39db7ab34ca5fb15f99ffef31047f","u39e89bf92deb47d7b9d4f213e1810dad","udf6c3490f0e0f4205f088804076c03a0","u7347c230e0c899ad5b63ce75f572b106","u47db3b35d1ac026a418269e3e3a96209","ub423e2c77e72692a5db5d1647af9341c","u004a34d952c7fdeb24e7aa94f8a75d4a","u96263a70aeb98aa9f5e416622f98e6e1"]
contact = cl.getProfile()
backup1 = cl.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup2 = ki.getProfile()
backup2.displayName = contact.displayName
backup2.statusMessage = contact.statusMessage
backup2.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup3 = kk.getProfile()
backup3.displayName = contact.displayName
backup3.statusMessage = contact.statusMessage
backup3.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup4 = kc.getProfile()
backup4.displayName = contact.displayName
backup4.statusMessage = contact.statusMessage
backup4.pictureStatus = contact.pictureStatus
contact = kr.getProfile()
backup5 = kr.getProfile()
backup5.displayName = contact.displayName
backup5.statusMessage = contact.statusMessage
backup5.pictureStatus = contact.pictureStatus
responsename = cl.getProfile().displayName
responsename2 = ki.getProfile().displayName
responsename3 = kk.getProfile().displayName
responsename4 = kc.getProfile().displayName
responsename5 = kr.getProfile().displayName
wait = {
"LeaveRoom":True,
"AutoJoin":False,
"AutoJoinCancel":True,
"memberscancel":25,
"Members":1,
"AutoCancel":{},
"AutoCancelon":False,
"joinkick":False,
"AutoKick":{},
"AutoKickon":False,
'pap':{},
'invite':{},
'steal':{},
'gift':{},
'likeOn':{},
'Leave':{},
'detectMention':False,
'kickMention':False,
'timeline':True,
"Timeline":True,
"comment1":"Nice (^_^)",
"comment2":"TOP BGT \(○^ω^○)/",
"comment3":"Lucu Banget!!! ヘ(^_^)ヘ",
"comment4":"Behahahahaha",
"comment5":"ヘ(^_^)ヘ",
"commentOn":True,
"commentBlack":{},
"message":"Thx For Add Me (^_^)\nInvite Me To Your Group ヘ(^_^)ヘ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":{},
"Qron":False,
"Contact":False,
"Sambutan":True,
"Ghost":True,
"inviteprotect":False,
"alwaysRead":False,
"Sider":{},
"Simi":{},
"lang":"JP",
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendAudioWithURL(self, to_, url):
path = self.downloadFileWithURL(url)
try:
self.sendAudio(to_, path)
except Exception as e:
raise Exception(e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True, verify=False)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"TOP BGT ")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Lucu banget kak")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Ngakak gw")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Behahahahaha")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
print "Like"
except:
pass
else:
print "Status Sudah di Like"
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
Name = ki.getContact(op.param2).displayName
Name = kk.getContact(op.param2).displayName
Name = kc.getContact(op.param2).displayName
Name = kr.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
cl.sendText(op.param1, "Hallo" + "☞ " + nick[0] + " ☜" + " ")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendImageWithURL(op.param1,image)
else:
cl.sendText(op.param1, "Haii " + "☞ " + nick[1] + " ☜" + "")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendImageWithURL(op.param1,image)
else:
cl.sendText(op.param1, "Nah " + "☞ " + Name + " ☜" + "")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendImageWithURL(op.param1,image)
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
cl.leaveRoom(op.param1)
if op.type == 21:
cl.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
kr.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Cmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in mid:
kr.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
kr.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
kr.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == False:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"Maaf " + cl.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami 👇👇👇!")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':tjia}
cl.sendMessage(c)
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["AutoCancel"][op.param1] == True:
if op.param3 in admin:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 19:
if wait["AutoKick"][op.param1] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"][op.param1] == True:
if op.param2 not in Bots:
if op.param2 not in admin:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).updateGroup(G)
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
kk.sendMessage(c)
kc.sendText(op.param1,"Hay " + cl.getContact(op.param2).displayName + "\nSelamat Datang Di ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note ya \n Semoga Betah ^_^")
kr.sendImageWithURL(op.param1,image)
print "MEMBER JOIN TO GROUP"
if op.type == 17:
if wait["joinkick"] == True:
if op.param2 in admin:
if op.param2 in Bots:
return
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
print "MEMBER JOIN KICK TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ki.sendText(op.param1,"Good Bye " + cl.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗")
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
print "MEMBER HAS LEFT THE GROUP"
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if wait["Ghost"] == True:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
try:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
km.kickoutFromGroup(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
km.sendMessage(c)
km.leaveGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
km.kickoutFromGroup(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
km.sendMessage(c)
km.leaveGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
if op.type == 26:
msg = op.message
if msg.text in ["Asem","asem","asyem"]:
cl.sendText(msg.to,"Kecruttt ")
if msg.text in ["mbuh","Mbuh","Mboh","Ben","ben"]:
cl.sendText(msg.to,"Yekk Ngamukan...")
if msg.text in ["Anune","anune"]:
cl.sendText(msg.to,"anune geneo ")
if msg.text in ["Baik","baik","apik","Apik"]:
cl.sendText(msg.to,"Alhamdulillah ")
if msg.text in ["Sue","sue"]:
cl.sendText(msg.to,"Sue Ora Jamu ")
if msg.text in ["Ben","ben"]:
cl.sendText(msg.to,"Anumu kejepit amben ")
if msg.text in ["Bodo","bodo"]:
cl.sendText(msg.to,"Amatnya kemana? ")
if msg.text in ["Di Hongkong","Di Taiwan"]:
cl.sendText(msg.to,"Wah...jauh amat... ")
if msg.text in ["Iya","iya","iyo"]:
cl.sendText(msg.to,"kek gak ikhlas ngmg iyanya... ")
if msg.text in ["ora","Ora","Ndak"]:
cl.sendText(msg.to,"Ora salah opo ora bener?... ")
if msg.text in ["Lemot","lemot"]:
cl.sendText(msg.to,"Ya sabar lah...orang sabar disayang tuhan,yekan ")
if wait["alwaysRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1005)
ki.like(url[25:58], url[66:], likeType=1002)
kk.like(url[25:58], url[66:], likeType=1004)
kc.like(url[25:58], url[66:], likeType=1003)
kr.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment2"])
kk.comment(url[25:58], url[66:], wait["comment3"])
kc.comment(url[25:58], url[66:], wait["comment4"])
kr.comment(url[25:58], url[66:], wait["comment5"])
cl.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in admin:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Sekali lagi nge tag gw sumpahin jomblo seumur hidup!","Dont Tag!! Lagi Sibuk",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","Tag Mulu Lo Anjirr!","Dia Lagi Off", cName + " Kenapa Tag? Kangen?","Dia Lagi Tidur\nJangan Di Tag " + cName, "Jangan Suka Tag Gua " + cName, "Kamu Siapa " + cName + "?", "Ada Perlu Apa " + cName + "?","Woii " + cName + " Jangan Ngetag, Riibut!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in admin:
cl.sendText(msg.to,ret_)
break
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
random.choice(KAC).sendText(msg.to,"Sudah")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
random.choice(KAC).sendText(msg.to,"Ditambahkan")
else:
cl.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
random.choice(KAC).sendText(msg.to,"Terhapus")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
random.choice(KAC).sendText(msg.to,"Tidak Ada Black List")
elif wait["Contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text is None:
return
elif msg.text in ["Creator","Owner"]:
msg.contentType = 13
msg.contentMetadata = {'mid': tjia}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu owner Kami (^_^)")
elif msg.text in ["Admin","admin"]:
msg.contentType = 13
admin1 = "ud5c39db7ab34ca5fb15f99ffef31047f"
admin2 = "u39e89bf92deb47d7b9d4f213e1810dad"
admin3 = "uf936537c5dd08238c6df4a2a6cc4452b"
msg.contentMetadata = {'mid': admin1}
random.choice(KAC).sendMessage(msg)
msg.contentMetadata = {'mid': admin2}
random.choice(KAC).sendMessage(msg)
msg.contentMetadata = {'mid': admin3}
random.choice(KAC).sendMessage(msg)
random.choice(KAC).sendText(msg.to,"Itu Admin Kami (^_^)")
elif "Admin add @" in msg.text:
if msg.from_ in Creator:
print "[Command]Admin add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Cbay Ditambahkan")
except:
pass
print "[Command]Admin add executed"
else:
cl.sendText(msg.to,"Command Denied.")
cl.sendText(msg.to,"Creator Permission Required.")
elif "Admin remove @" in msg.text:
if msg.from_ in Creator:
print "[Command]Admin Remove Executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Cbay Dihapus")
except:
pass
print "[Command]Admin remove executed"
else:
cl.sendText(msg.to,"Command Denied.")
cl.sendText(msg.to,"Creator Permission Required.")
elif msg.text in ["Admin list","admin list","List admin"]:
if admin == []:
cl.sendText(msg.to,"The Admin List Is Empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "╔═════════════════════════\n║ ☆☞ ADMIN Cbay ☜☆\n╠═════════════════════════\n"
for mi_d in admin:
mc += "╠••> " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc + "╚═════════════════════════")
print "[Command]Admin List executed"
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Yang Buat Grup Ini")
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
random.choice(KAC).sendText(msg.to,msg.text)
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.sendText(msg.to,"Hadiah wes dikirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
groups = ki.getGroup(msg.to)
groups = kk.getGroup(msg.to)
groups = kc.getGroup(msg.to)
groups = kr.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
random.choice(KAC).sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
kr.findAndAddContactsByMid(target)
random.choice(KAC).inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
random.choice(KAC).sendText(msg.to,"Limit Invite")
wait['invite'] = False
break
elif msg.text in ["Key creator","help creator","Help creator"]:
cl.sendText(msg.to,creatorMessage)
elif msg.text in ["Key group","help group","Help group"]:
cl.sendText(msg.to,groupMessage)
elif msg.text in ["Key","help","Help"]:
cl.sendText(msg.to,helpMessage)
elif msg.text in ["Key self","help self","Help self"]:
cl.sendText(msg.to,selfMessage)
elif msg.text in ["Key bot","help bot","Help bot"]:
cl.sendText(msg.to,botMessage)
elif msg.text in ["Key set","help set","Help set"]:
cl.sendText(msg.to,setMessage)
elif msg.text in ["Key media","help media","Help media"]:
cl.sendText(msg.to,mediaMessage)
elif msg.text in ["Key admin","help admin","Help admin"]:
cl.sendText(msg.to,adminMessage)
elif msg.text in ["Key protect","help protect","Help protect"]:
cl.sendText(msg.to,protectMessage)
elif msg.text in ["List group"]:
gid = cl.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = cl.getGroup(i).name
h += "♦【%s】\n" % (gn)
jml += 1
cl.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml))
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = cl.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Khusus zhu")
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
random.choice(KAC).sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Khusus zhu")
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = kr.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
h = ki.getGroup(i).name
h = kk.getGroup(i).name
h = kc.getGroup(i).name
h = kr.getGroup(i).name
if h == ng:
random.choice(KAC).inviteIntoGroup(i,[Creator])
cl.sendText(msg.to,"Success Join To ["+ h +"] Group")
else:
pass
else:
cl.sendText(msg.to,"Khusus zhu")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "LG: " in msg.text:
ng = msg.text.replace("LG: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Papay...kami Di Paksa Keluar!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kr.leaveGroup(i)
cl.sendText(msg.to,"Success Left ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus zhu")
elif "Leave all group" == msg.text:
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
cl.sendText(i,"Papay...kami Di Paksa Keluar!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kr.leaveGroup(i)
cl.sendText(msg.to,"Success")
else:
cl.sendText(msg.to,"Khusus zhu")
elif "Pict group: " in msg.text:
saya = msg.text.replace('Pict group: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["cancelall","Cancelall"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
cl.sendText(msg.to,"Tidak Ada Yang Pending")
else:
cl.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group")
elif msg.text in ["Ourl","Url on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
cl.sendText(msg.to,"Url Sudah Aktif")
else:
cl.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Curl","Url off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
cl.sendText(msg.to,"Url Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Join on","Autojoin on"]:
if msg.from_ in admin:
wait["AutoJoin"] = True
wait["AutoJoinCancel"] = False
cl.sendText(msg.to,"Auto Join Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Join off","Autojoin off"]:
if msg.from_ in admin:
wait["AutoJoin"] = False
cl.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Joincancel on","Autojoincancel on"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = True
wait["AutoJoin"] = False
cl.sendText(msg.to,"Auto Join Cancel Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Joincancel off","Autojoincancel off"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = False
cl.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Respon on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Respon off"]:
if msg.from_ in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon Sudah Off")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon Kick Sudah Off")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Leave on"]:
if msg.from_ in admin:
wait["Leave"] = True
cl.sendText(msg.to,"Leave Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Autocancel on"]:
# if msg.from_ in admin:
wait["AutoCancel"][msg.to] = True
wait["AutoCancelon"] = True
cl.sendText(msg.to,"Auto Cancel Sudah Aktif")
print wait["AutoCancel"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Autocancel off"]:
# if msg.from_ in admin:
wait["AutoCancel"][msg.to] = False
wait["AutoCancelon"] = False
cl.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan")
print wait["AutoCancel"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Joinkick on"]:
# if msg.from_ in admin:
wait["joinkick"] = True
wait["Sambutan"] = False
cl.sendText(msg.to,"Join Kick Sudah Aktif")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Joinkick off"]:
# if msg.from_ in admin:
wait["joinkick"] = False
cl.sendText(msg.to,"Join Kick Sudah Di Nonaktifkan")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Invitepro on","Inviteprotect on"]:
# if msg.from_ in admin:
wait["inviteprotect"] = True
cl.sendText(msg.to,"Invite Protect Sudah Aktif")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Invitepro off","Inviteprotect off"]:
# if msg.from_ in admin:
wait["inviteprotect"] = False
cl.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif "Qr on" in msg.text:
# if msg.from_ in admin:
wait["Qr"][msg.to] = True
wait["Qron"] = True
cl.sendText(msg.to,"QR Protect Sudah Aktif")
print wait["Qr"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif "Qr off" in msg.text:
# if msg.from_ in admin:
wait["Qr"][msg.to] = False
wait["Qron"] = False
cl.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan")
print wait["Qr"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Autokick on"]:
# if msg.from_ in admin:
wait["AutoKick"][msg.to] = True
wait["AutoKickon"] = True
cl.sendText(msg.to,"Auto Kick Sudah Aktif")
print wait["AutoKick"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Autokick off"]:
# if msg.from_ in admin:
wait["AutoKick"][msg.to] = False
wait["AutoKickon"] = False
cl.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan")
print wait["AutoKick"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Ghost on"]:
# if msg.from_ in admin:
wait["Ghost"] = True
cl.sendText(msg.to,"Ghost Sudah Aktif")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Ghost off"]:
# if msg.from_ in admin:
wait["Ghost"] = False
cl.sendText(msg.to,"Ghost Sudah Di Nonaktifkan")
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Allprotect on"]:
if msg.from_ in admin:
wait["AutoCancel"][msg.to] = True
wait["AutoCancelon"] = True
wait["inviteprotect"] = True
wait["joinkick"] = True
wait["AutoKick"][msg.to] = True
wait["AutoKickon"] = True
wait["Qr"][msg.to] = True
wait["Qron"] = True
wait["Ghost"] = True
cl.sendText(msg.to,"All Protect Aktif Semua")
print wait["AutoCancel"]
print wait["AutoKick"]
print wait["Qr"]
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["Allprotect off"]:
# if msg.from_ in admin:
wait["AutoCancel"][msg.to] = False
wait["AutoCancelon"] = False
wait["inviteprotect"] = False
wait["joinkick"] = False
wait["AutoKick"][msg.to] = False
wait["AutoKickon"] = False
wait["Qr"][msg.to] = False
wait["Qron"] = False
wait["Ghost"] = False
cl.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua")
print wait["AutoCancel"]
print wait["AutoKick"]
print wait["Qr"]
# else:
# else:
# cl.sendText(msg.to,"Khusus zhu")
elif msg.text in ["K on","Contact on"]:
wait["Contact"] = True
cl.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["Contact"] = False
cl.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
cl.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
cl.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
wait["joinkick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif "Cctv on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
cl.sendText(msg.to,"mata")
elif "Cctv off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
cl.sendText(msg.to, " Off")
else:
cl.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Status"]:
md = ""
if wait["Sambutan"] == True: md+="╠➩✔️ Sambutan : On\n"
else:md+="╠➩❌ Sambutan : Off\n"
if wait["joinkick"] == True: md+="╠➩✔️ Join Kick : On\n"
else:md+="╠➩❌ Join Kick : Off\n"
if wait["AutoJoin"] == True: md+="╠➩✔️ Auto Join : On\n"
else: md +="╠➩❌ Auto Join : Off\n"
if wait["AutoJoinCancel"] == True: md+="╠➩✔️ Auto Join Cancel : On\n"
else: md +="╠➩❌ Auto Join Cancel : Off\n"
if wait["Leave"] == True: md+="╠➩✔️ Leave : On\n"
else: md +="╠➩❌ Leave : Off\n"
if wait["Contact"] == True: md+="╠➩✔️ Info Contact : On\n"
else: md+="╠➩❌ Info Contact : Off\n"
if wait["AutoCancelon"] == True:md+="╠➩✔️ Auto Cancel : On\n"
else: md+= "╠➩❌ Auto Cancel : Off\n"
if wait["inviteprotect"] == True:md+="╠➩✔️ Invite Protect : On\n"
else: md+= "╠➩❌ Invite Protect : Off\n"
if wait["Qron"] == True: md+="╠➩✔️ Qr Protect : On\n"
else:md+="╠➩❌ Qr Protect : Off\n"
if wait["AutoKickon"] == True: md+="╠➩✔️ Auto Kick : On\n"
else:md+="╠➩❌ Auto Kick : Off\n"
if wait["Ghost"] == True: md+="╠�������������✔️ Ghost : On\n"
else:md+="╠➩❌ Ghost : Off\n"
if wait["alwaysRead"] == True: md+="╠➩✔️ Always Read : On\n"
else:md+="╠➩❌ Always Read: Off\n"
if wait["detectMention"] == True: md+="╠➩✔️ Auto Respon : On\n"
else:md+="╠➩❌ Auto Respon : Off\n"
if wait["kickMention"] == True: md+="╠➩✔️ Auto Respon Kick : On\n"
else:md+="╠➩❌ Auto Respon Kick : Off\n"
if wait["Sider"] == True: md+="╠➩✔️ Auto Sider : On\n"
else:md+="╠➩❌ Auto Sider: Off\n"
if wait["Simi"] == True: md+="╠➩✔️ Simisimi : On\n"
else:md+="╠➩❌ Simisimi: Off\n"
cl.sendText(msg.to,"╔═════════════════════════\n""║ ☆☞ S T A T U S ☜☆\n""╠═════════════════════════\n"+md+"╚═════════════════════════")
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["All gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["bot1 Gift","bot1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["bot2 Gift","bot2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
kk.sendMessage(msg)
elif msg.text in ["bot3 Gift","bot3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kc.sendMessage(msg)
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kr.sendText(msg.to,_name + " cek Hadiahmu")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["you","kau","kamu"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '7',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["please","pliss","mohon","tolong"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '4',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["haa","haaa","kaget"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '3',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["lucu","ngakak","lol"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '110',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["hmm","hmmm"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '101',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["tidur"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["woi","kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Tagall","Tag all","tegal","Tegal","Tegall","tegall"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Setview","Setpoint","Cctv","Nyimak","nyimak"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "cctv")
print "Setview"
elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "╔═════════════════════════\n║ ☆☞ Daftar CCTV☜☆\n╠═════════════════════════\n╠➩"
grp = '\n╠➩ '.join(str(f) for f in dataResult)
total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════"
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "☆Auto Checkpoint☆")
else:
cl.sendText(msg.to, "☆Durung Ono CCTV☆")
print "Viewseen"
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in Creator:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "liketemen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in Creator:
print "[Command]Like executed"
cl.sendText(msg.to,"Siap Boss")
cl.sendText(msg.to,"Siap Like Status\nDelay untuk beberapa Detik\nJangan perintah dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
elif "Kick " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
ki.kickoutFromGroup(msg.to,[mention['M']])
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["memberscancel"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif "Add all" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Like"]:
wait["likeOn"] = True
cl.sendText(msg.to,"Sini Shere Postmu biar ku Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftc"]:
wait["gift"] = True
cl.sendText(msg.to,"Send Contact")
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
kicker = [ki,kk,kc]
if midd not in admin:
random.choice(kicker).kickoutFromGroup(msg.to,[midd])
else:
cl.sendText(msg.to,"Admin Detected")
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
ki.findAndAddContactsByMid(midd)
kk.findAndAddContactsByMid(midd)
kc.findAndAddContactsByMid(midd)
kr.findAndAddContactsByMid(midd)
random.choice(KAC).inviteIntoGroup(msg.to,[midd])
elif "Invite creator" in msg.text:
midd = "u14f64e139a3817afaabe27d237afb36b"
random.choice(KAC).inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]:
gs = cl.getGroup(msg.to)
kk.sendText(msg.to,"Selamat Datang Di "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
kk.sendMessage(msg)
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
cl.sendText(i,""+bc+"")
cl.sendText(msg.to,"Success BC BosQ")
else:
cl.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Cancel"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
cl.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["bot1 Cancel"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
ki.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["bot2 Cancel"]:
gid = kk.getGroupIdsInvited()
for i in gid:
kk.rejectGroupInvitation(i)
kk.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["bot3 Cancel"]:
gid = kc.getGroupIdsInvited()
for i in gid:
kc.rejectGroupInvitation(i)
kc.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["masuk","Join all"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kr.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
G.preventJoinByTicket(G)
ki.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["bot1 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["bot2 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["bot3 join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["bot4 join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kr.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
kr.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["GJ"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
km.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["timeline"]:
try:
url = cl.activity(limit=5)
cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif msg.text in ["Minggat bot"]:
if wait["Leave"] == True:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kr.leaveGroup(msg.to)
else:
cl.sendText(msg.to,"Leave urung On")
elif msg.text in ["@bye","@Bye","@minggat"]:
if wait["Leave"] == True:
cl.leaveGroup(msg.to)
wait["Leave"] = False
else:
cl.sendText(msg.to,"Ngomong dulu Admin Ku")
elif msg.text in ["Absen"]:
cl.sendText(msg.to,"Absen Grak!!")
ki.sendText(msg.to," Paimin Hadiir \(ˆ▿ˆ)/")
kk.sendText(msg.to," Prapto Hadiir \(ˆ▿ˆ)/")
kc.sendText(msg.to," Parji Hadiir \(ˆ▿ˆ)/")
kr.sendText(msg.to," Mbok Sundari Hadiir \(ˆ▿ˆ)/")
cl.sendText(msg.to,"Semua Hadirr grakk...!")
elif "Tagme: " in msg.text:
c = msg.text.replace("Tagme: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["tagme"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif msg.text.lower() in ["respon"]:
cl.sendText(msg.to,responsename)
ki.sendText(msg.to,responsename2)
kk.sendText(msg.to,responsename3)
kc.sendText(msg.to,responsename4)
kr.sendText(msg.to,responsename5)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
cl.sendText(msg.to, "Progress...")
random.choice(KAC).sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Speed test"]:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
random.choice(KAC).sendText(msg.to, "%sseconds" % (elapsed_time))
elif "Nk: " in msg.text:
if msg.from_ in Creator:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kr.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
nk0 = msg.text.replace("Nk: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in X.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if target not in admin:
kr.kickoutFromGroup(msg.to,[target])
kr.leaveGroup(msg.to)
ki.sendText(msg.to,"Succes BosQ")
kk.sendText(msg.to,"Pakyu~")
else:
cl.sendText(msg.to,"Admin Detected")
else:
cl.sendText(msg.to,"Lu sape!")
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
ki.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
ki.sendText(msg.to,"send contact")
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
ki.sendText(msg.to,"Succes BosQ")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Admin Detected~")
elif msg.text in ["Banlist","Ban list"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
random.choice(KAC).sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
if msg.from_ in admin:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
ki.sendText(msg.to,"Succes BosQ")
except:
ki.sendText(msg.to,"Succes BosQ")
elif msg.text.lower() == 'clear ban':
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
else:
cl.sendText(msg.to, "Khusus creator")
elif msg.text in ["Kill"]:
if msg.toType == 2:
if msg.from_ in admin:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"...")
kc.sendText(msg.to,"...")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" == msg.text:
if msg.from_ in Creator:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Kickall","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"Sampai jumpaa~")
kc.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
cl.sendText(msg.to,str(e))
cl.inviteIntoGroup(msg.to, targets)
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
elif msg.text in ["Turn off"]:
if msg.from_ in Creator:
try:
import sys
sys.exit()
except:
pass
elif 'Crash' in msg.text:
# if msg.from_ in Creator:
msg.contentType = 13
msg.contentMetadata = {'mid': "zhu,'"}
cl.sendMessage(msg)
elif "Mbok copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mbok copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "bot1 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("bot1 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "bot2 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("bot2 copy @","")
_nametarget = _name.rstrip(' ')
gs = kk.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kk.CloneContactProfile(target)
kk.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "bot3 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("bot3 copy @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "bot4 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("bot4 copy @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kr.CloneContactProfile(target)
kr.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Backup all"]:
try:
ki.updateDisplayPicture(backup2.pictureStatus)
ki.updateProfile(backup2)
kk.updateDisplayPicture(backup3.pictureStatus)
kk.updateProfile(backup3)
kc.updateDisplayPicture(backup4.pictureStatus)
kc.updateProfile(backup4)
kr.updateDisplayPicture(backup5.pictureStatus)
kr.updateProfile(backup5)
cl.updateDisplayPicture(backup1.pictureStatus)
cl.updateProfile(backup1)
cl.sendText(msg.to, "All Done (^_^)")
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'music ' in msg.text.lower():
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif '/lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('/lirik ','')
params = {'songname': songname}
r = requests.get('http://api.secold.com/joox/cari/' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "/musrik " in msg.text:
songname = msg.text.replace("/musrik ","")
params = {"songname": songname}
r = requests.get('http://api.ntcorp.us/joox/search?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
cl.sendAudioWithURL(msg.to,abc)
cl.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil)
cl.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hu_h9u_1uKnBVQAaS3f5VJ2kFJB0ibiw4LXNhRHJIIRN-ID4kbHQ1EicTdEQveGVxPnIyFHBBcUYv"]
pilih = random.choice(link)
cl.sendImageWithURL(msg.to,pilih)
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 10
while(t):
random.choice(KAC).sendText(msg.to, (bctxt))
t-=1
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = cl.getAllContactIds()
t = 20
for manusia in orang:
while(t):
cl.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = cl.getAllContactIds()
for manusia in orang:
cl.sendText(manusia, (broadcasttxt))
elif '/ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("/ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
zhu = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, zhu)
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "Checkig " in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
cl.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
cl.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif 'Youtube ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif 'Youtubevideo: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubevideo: ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
cl.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to, "Could not find it")
elif "Sayin " in msg.text:
say = msg.text.replace("Sayin ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Sayen " in msg.text:
say = msg.text.replace("Sayen ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Sayjpn " in msg.text:
say = msg.text.replace("Sayjpn ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Sayjer " in msg.text:
say = msg.text.replace("Sayjer ","")
lang = 'de'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Sayko " in msg.text:
say = msg.text.replace("Sayko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = cl.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() in ["hi","hai","halo","hallo"]:
beb = "Hi Sayang 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
kr.sendText(msg.to,beb)
elif msg.text.lower() in ["siang","Siang"]:
beb = "siang 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
kr.sendText(msg.to,beb)
elif msg.text.lower() in ["pagi","Pagi"]:
beb = "PAgi 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["sore","Sore"]:
beb = "Sore 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
kc.sendText(msg.to,beb)
elif msg.text.lower() in ["malam","Malam"]:
beb = "Malam 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["bot","Bot","Mbok","mbok"]:
beb = "Hadirr 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["Assalamualaikum","assalamualaikum"]:
beb = "Wa'alaikumsalam.wr.wb " +cl.getContact(msg.from_).displayName + "😊😊"
kr.sendText(msg.to,beb)
elif msg.text.lower() in ["Salken","salken","Salam kenal","salam kenal"]:
beb = "Salam Kenal " +cl.getContact(msg.from_).displayName + "😊namaku sri😊"
cl.sendText(msg.to,beb)
beb = "Salken " +cl.getContact(msg.from_).displayName + "😊aku paimin😊"
ki.sendText(msg.to,beb)
beb = "Salken " +cl.getContact(msg.from_).displayName + "😊aku prapto😊"
kk.sendText(msg.to,beb)
beb = "Salken " +cl.getContact(msg.from_).displayName + "😊saya parji😊"
kc.sendText(msg.to,beb)
beb = "Salam Kenal " +cl.getContact(msg.from_).displayName + "😊aku sundari😊"
kr.sendText(msg.to,beb)
elif msg.text.lower() in ["Pie?","pie?","Piye?","piye?","Pie","pie","Piye","piye"]:
beb = "Apane " +cl.getContact(msg.from_).displayName + " ????"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["anu","Anu"]:
beb = "Anune geneoo " +cl.getContact(msg.from_).displayName + " ????"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["Kangen","kangen","kangeen"]:
beb = "Aku juga kangen km " +cl.getContact(msg.from_).displayName + " 😘"
cl.sendText(msg.to,beb)
elif msg.text.lower() in ["Apa kabar","apakabar","apa kabar","Apakabar"]:
beb = "Saya baik " +cl.getContact(msg.from_).displayName + " "
cl.sendText(msg.to,beb)
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
cl.sendText(msg.to,"Tuh Linknya Kak (^_^)")
elif "Smule " in msg.text:
a = msg.text.replace("Smule ","")
b = urllib.quote(a)
kr.sendText(msg.to,"goleki id smule...")
kr.sendText(msg.to, "Ketemu..\nJenenge : "+b+"\nId smule: http://smule.com/" +b)
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Youinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
elif "/bio " in msg.text:
string = msg.text.replace("/bio ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
ki.updateProfile(profile)
kk.updateProfile(profile)
kc.updateProfile(profile)
kr.updateProfile(profile)
cl.sendText(msg.to,"All Done")
elif "Cnmbok " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Cnmbok ","")
if len(string.decode('utf-8')) <= 5000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Done")
elif "Cnbot1 " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Cnbot1 ","")
if len(string.decode('utf-8')) <= 5000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Done")
elif "Cnbot2 " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Cnbot2 ","")
if len(string.decode('utf-8')) <= 5000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Done")
elif "Cnbot3 " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Cnbot3 ","")
if len(string.decode('utf-8')) <= 5000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Done")
elif "Cnbot4 " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Cnbot4 ","")
if len(string.decode('utf-8')) <= 5000:
profile = cl.getProfile()
profile.displayName = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Done")
elif "Ulti " in msg.text:
if msg.from_ in Creator:
ulti0 = msg.text.replace("Ulti ","")
ulti1 = ulti0.rstrip()
ulti2 = ulti1.replace("@","")
ulti3 = ulti2.rstrip()
_name = ulti3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets ==[]:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
km.kickoutFromGroup(msg.to,[target])
km.leaveGroup(msg.to)
print (msg.to,[g.mid])
except:
km.sendText(msg.t,"Ter ELIMINASI....")
km.sendText(msg.to,"WOLES brooo....!!!")
km.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text.lower() in ["mymid","myid"]:
middd = "Name : " +cl.getContact(msg.from_).displayName + "\nMid : " +msg.from_
kr.sendText(msg.to,middd)
elif msg.text.lower() in ["me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/hari " in msg.text:
apk = msg.text.replace("/hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/berapa " in msg.text:
apk = msg.text.replace("/berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "berapakah " in msg.text:
apk = msg.text.replace("berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
cl.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
cl.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Youtubesearch: " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
cl.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-jw " in msg.text:
isi = msg.text.replace("Tr-jw ","")
translator = Translator()
hasil = translator.translate(isi, dest='jw')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-su " in msg.text:
isi = msg.text.replace("Tr-su ","")
translator = Translator()
hasil = translator.translate(isi, dest='su')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Trjp " in msg.text:
isi = msg.text.replace("Trjp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Trjer " in msg.text:
isi = msg.text.replace("Trjer ","")
translator = Translator()
hasil = translator.translate(isi, dest='de')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif msg.text in ["Spam","spam","SPAM"]:
# if msg.from_ in admin:
beb = "" +cl.getContact(msg.from_).displayName + " belum mandi"
cl.sendText(msg.to,beb)
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tun tuang")
kr.sendText(msg.to,"Tapi masih cakep juga")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"apalagi kalau sudah mandi")
ki.sendText(msg.to,"Tak tun tuang")
kr.sendText(msg.to,"Pastinya cakep sekali")
cl.sendText(msg.to,"yiha")
kr.sendText(msg.to,"Kalau orang lain melihatnya")
kk.sendText(msg.to,"Tak tun tuang")
ki.sendText(msg.to,"Tak tuntuang")
cl.sendText(msg.to,"Badaknya taba bana")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tuntuang")
kr.sendText(msg.to,"Tapi kalau langsuang diidu")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Astagfirullah baunya")
cl.sendText(msg.to,"Males lanjutin ah")
kr.sendText(msg.to,"Sepi bat")
kk.sendText(msg.to,"Iya sepi udah udah")
beb = "Cuma si " +cl.getContact(msg.from_).displayName + " yang denger kita nyanyi"
cl.sendText(msg.to,beb)
ki.sendText(msg.to,"Nah")
kk.sendText(msg.to,"Mending gua makan dulu")
cl.sendText(msg.to,"Siyap")
ki.sendText(msg.to,"Okeh")
beb = "Katanya si " +cl.getContact(msg.from_).displayName + " Jomblo yaa"
kk.sendText(msg.to,beb)
cl.sendText(msg.to,"Iya emang")
ki.sendText(msg.to,"Denger denger si dia lagi nyari pacar")
kr.sendText(msg.to,"Udah ah gosip mulu doain aja biar cepet dapet")
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getgroup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "info" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Sayko " in msg.text:
say = msg.text.replace("Sayko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kk.sendAudio(msg.to,"hasil.mp3")
elif "Sayjp " in msg.text:
say = msg.text.replace("Sayjp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Sayar " in msg.text:
say = msg.text.replace("Sayar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Hayy " in msg.text:
say = msg.text.replace("Hayy ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
cl.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
cl.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
cl.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
kr.removeAllMessages(op.param2)
print "[Command] Remove Chat"
cl.sendText(msg.to,"Done")
except Exception as error:
print error
cl.sendText(msg.to,"Error")
elif "Invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Invitemeto: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
ki.findAndAddContactsByMid(msg.from_)
kk.findAndAddContactsByMid(msg.from_)
kc.findAndAddContactsByMid(msg.from_)
kr.findAndAddContactsByMid(msg.from_)
random.choice(KAC).inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu")
elif msg.text in ["Glist"]:
cl.sendText(msg.to, "Tunggu Sebentar. . .")
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "╠➩" + "%s\n" % (cl.getGroup(i).name +" ~> ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"╔════════════════════════���\n║ ☆☞ LIST GROUPS☜���\n���══���══════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════")
elif msg.text in ["Glistmid"]:
gruplist = kr.getGroupIdsJoined()
kontak = kr.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to, "https://www.google.com/" + b)
cl.sendText(msg.to,"Itu Dia Linknya. . .")
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = cl.getGroup(gids)
for i in gid:
if i is not None:
try:
cl.rejectGroupInvitation(i)
except:
cl.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
cl.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Mbok acc"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"Wes Trimo Kabeh undangan grup nak:\n" + _list)
else:
cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["bot1 acc invite"]:
if msg.from_ in admin:
gid = ki.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = ki.getGroup(i)
_list += gids.name
ki.acceptGroupInvitation(i)
else:
break
if gid is not None:
ki.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
ki.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["bot2 acc invite"]:
if msg.from_ in admin:
gid = kk.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kk.getGroup(i)
_list += gids.name
kk.acceptGroupInvitation(i)
else:
break
if gid is not None:
kk.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kk.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["bot3 acc invite"]:
if msg.from_ in admin:
gid = kc.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kc.getGroup(i)
_list += gids.name
kc.acceptGroupInvitation(i)
else:
break
if gid is not None:
kc.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kc.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["bot4 acc invite"]:
if msg.from_ in admin:
gid = kr.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kr.getGroup(i)
_list += gids.name
kr.acceptGroupInvitation(i)
else:
break
if gid is not None:
kr.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kr.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
cl.sendGifWithURL(msg.to,gore)
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_threading.py | import epics
import threading
import pvnames
def test_basic_thread():
result = []
def thread():
epics.ca.use_initial_context()
pv = epics.get_pv(pvnames.double_pv)
result.append(pv.get())
epics.ca.use_initial_context()
t = threading.Thread(target=thread)
t.start()
t.join()
assert len(result) and result[0] is not None
def test_basic_cathread():
result = []
def thread():
pv = epics.get_pv(pvnames.double_pv)
result.append(pv.get())
epics.ca.use_initial_context()
t = epics.ca.CAThread(target=thread)
t.start()
t.join()
assert len(result) and result[0] is not None
def test_attach_context():
result = []
def thread():
epics.ca.create_context()
pv = epics.get_pv(pvnames.double_pv2)
assert pv.wait_for_connection()
result.append(pv.get())
epics.ca.detach_context()
epics.ca.attach_context(ctx)
pv = epics.get_pv(pvnames.double_pv)
assert pv.wait_for_connection()
result.append(pv.get())
epics.ca.use_initial_context()
ctx = epics.ca.current_context()
t = threading.Thread(target=thread)
t.start()
t.join()
assert len(result) == 2 and result[0] is not None
print(result)
def test_pv_from_main():
result = []
def thread():
result.append(pv.get())
epics.ca.use_initial_context()
pv = epics.get_pv(pvnames.double_pv2)
t = epics.ca.CAThread(target=thread)
t.start()
t.join()
assert len(result) and result[0] is not None
|
shell.py | # !/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from ambari_commons import subprocess32
import os
import signal
import threading
from contextlib import contextmanager
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
logger = logging.getLogger()
threadLocal = threading.local()
# default timeout for async invoked processes
TIMEOUT_SECONDS = 300
tempFiles = []
def noteTempFile(filename):
tempFiles.append(filename)
def getTempFiles():
return tempFiles
class _dict_to_object:
def __init__(self, entries):
self.__dict__.update(entries)
def __getitem__(self, item):
return self.__dict__[item]
# windows specific code
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def kill_process_with_children(parent_pid):
shellRunnerWindows().run(["taskkill", "/F", "/T", "/PID", "{0}".format(parent_pid)])
class shellRunner(object):
def run(self, script, user=None):
pass
def runPowershell(self, file=None, script_block=None, args=[]):
raise NotImplementedError()
def launch_subprocess(command):
"""
Process launch helper
:param command Command to execute
:type command list[str]|str
:return Popen object
"""
is_shell = not isinstance(command, (list, tuple))
return subprocess32.Popen(command, stdout=subprocess32.PIPE, stderr=subprocess32.PIPE, shell=is_shell, close_fds=True)
def watchdog_func(event, cmd, exec_timeout):
"""
Watchdog function for subprocess executors
:type event Event
:type cmd Popen
:type exec_timeout int
Usage example:
event = threading.Event()
cmd = Popen(...)
thread = threading.Thread(target=watchdog_func, args=(event, cmd, execution_timeout,))
thread.start()
....cmd.communicate() or any another processing....
event.set()
thread.join()
......result code....
"""
event.wait(exec_timeout)
if cmd.returncode is None:
logger.error("Task timed out and will be killed")
kill_process_with_children(cmd.pid)
def subprocess_with_timeout(command, execution_timeout=None):
"""
Run command with limited time for execution, after timeout command would be killed
:param command Command to execute
:param execution_timeout execution time limit in seconds. Defaulting to TIMEOUT_SECONDS global constant
:type command list[str]|str
:type execution_timeout int
:rtype dict
"""
event = threading.Event()
if execution_timeout is None:
execution_timeout = TIMEOUT_SECONDS
os_stat = launch_subprocess(command)
logger.debug("Launching watchdog thread")
event.clear()
thread = threading.Thread(target=watchdog_func, args=(event, os_stat, execution_timeout,))
thread.start()
out, err = os_stat.communicate()
result = {
"out": out,
"err": err,
"retCode": os_stat.returncode
}
event.set()
thread.join()
return result
@contextmanager
def process_executor(command, timeout=None, error_callback=None):
"""
Context manager for command execution
:type command list|str
:type timeout None|int
:type error_callback func
:return stdout stream
Usage example:
Option 1. Basic
with process_executor(["ls", "-la]) as stdout:
for line in stdout:
print line
Option 2. Extended
def error_handler(command, error_log, exit_code):
print "Command '{}' failed".format(command)
print "Exit Code: {} StdOut: {} \n".format(exit_code, "\n".join(error_log))
with process_executor(["ls", "-la], timeout=10, error_callback=error_handler) as stdout:
for line in stdout:
print line
"""
if not timeout:
timeout = TIMEOUT_SECONDS
event = threading.Event()
cmd = launch_subprocess(command)
thread = threading.Thread(target=watchdog_func, args=(event, cmd, timeout,))
thread.start()
yield cmd.stdout
exit_code = cmd.poll()
event.set()
thread.join()
if exit_code is None:
kill_process_with_children(cmd.pid)
if error_callback and exit_code and exit_code > 0:
error_callback(command, cmd.stderr.readlines(), exit_code)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class shellRunnerWindows(shellRunner):
# Run any command
def run(self, script, user=None):
logger.warn("user argument ignored on windows")
code = 0
if isinstance(script, list):
cmd = " ".join(script)
else:
cmd = script
p = subprocess32.Popen(cmd, stdout=subprocess32.PIPE,
stderr=subprocess32.PIPE, shell=False)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return {'exitCode': code, 'output': out, 'error': err}
def runPowershell(self, file=None, script_block=None, args=[]):
logger.warn("user argument ignored on windows")
code = 0
cmd = None
if file:
cmd = ['powershell', '-WindowStyle', 'Hidden', '-File', file] + args
elif script_block:
cmd = ['powershell', '-WindowStyle', 'Hidden', '-Command', script_block] + args
p = subprocess32.Popen(cmd, stdout=subprocess32.PIPE,
stderr=subprocess32.PIPE, shell=False)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return _dict_to_object({'exitCode': code, 'output': out, 'error': err})
def get_all_children(base_pid):
"""
Return all child PIDs of base_pid process
:param base_pid starting PID to scan for children
:return tuple of the following: pid, binary name, command line incl. binary
:type base_pid int
:rtype list[(int, str, str)]
"""
parent_pid_path_pattern = "/proc/{0}/task/{0}/children"
comm_path_pattern = "/proc/{0}/comm"
cmdline_path_pattern = "/proc/{0}/cmdline"
def read_children(pid):
try:
with open(parent_pid_path_pattern.format(pid), "r") as f:
return [int(item) for item in f.readline().strip().split(" ")]
except (IOError, ValueError):
return []
def read_command(pid):
try:
with open(comm_path_pattern.format(pid), "r") as f:
return f.readline().strip()
except IOError:
return ""
def read_cmdline(pid):
try:
with open(cmdline_path_pattern.format(pid), "r") as f:
return f.readline().strip()
except IOError:
return ""
pids = []
scan_pending = [int(base_pid)]
while scan_pending:
curr_pid = scan_pending.pop(0)
children = read_children(curr_pid)
pids.append((curr_pid, read_command(curr_pid), read_cmdline(curr_pid)))
scan_pending.extend(children)
return pids
def is_pid_exists(pid):
"""
Check if process with PID still exist (not counting it real state)
:type pid int
:rtype bool
"""
pid_path = "/proc/{0}"
try:
return os.path.exists(pid_path.format(pid))
except (OSError, IOError):
logger.debug("Failed to check PID existence")
return False
def get_existing_pids(pids):
"""
Check if process with pid still exists (not counting it real state).
Optimized to check PID list at once.
:param pids list of PIDs to filter
:return list of still existing PID
:type pids list[int]
:rtype list[int]
"""
existing_pid_list = []
try:
all_existing_pid_list = [int(item) for item in os.listdir("/proc") if item.isdigit()]
except (OSError, IOError):
logger.debug("Failed to check PIDs existence")
return existing_pid_list
for pid_item in pids:
if pid_item in all_existing_pid_list:
existing_pid_list.append(pid_item)
return existing_pid_list
def wait_for_process_list_kill(pids, timeout=5, check_step_time=0.1):
"""
Process tree waiter
:type pids list[int]
:type timeout int|float
:type check_step_time int|float
:param pids list of PIDs to watch
:param timeout how long wait till giving up, seconds. Set 0 for nowait or None for infinite time
:param check_step_time how often scan for existing PIDs, seconds
"""
from threading import Thread, Event
import time
stop_waiting = Event()
def _wait_loop():
while not stop_waiting.is_set() and get_existing_pids(pids):
time.sleep(check_step_time)
if timeout == 0: # no need for loop if no timeout is set
return
th = Thread(target=_wait_loop)
stop_waiting.clear()
th.start()
th.join(timeout=timeout)
stop_waiting.set()
th.join()
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def kill_process_with_children(base_pid):
"""
Process tree killer
:type base_pid int
"""
from resource_management.core import sudo # to avoid circular dependency
exception_list = ["apt-get", "apt", "yum", "zypper", "zypp"]
signals_to_post = {
"SIGTERM": signal.SIGTERM,
"SIGKILL": signal.SIGKILL
}
full_child_pids = get_all_children(base_pid)
all_child_pids = [item[0] for item in full_child_pids if item[1].lower() not in exception_list and item[0] != os.getpid()]
error_log = []
for sig_name, sig in signals_to_post.items():
# we need to kill processes from the bottom of the tree
pids_to_kill = sorted(get_existing_pids(all_child_pids), reverse=True)
for pid in pids_to_kill:
try:
sudo.kill(pid, sig)
except OSError as e:
error_log.append((sig_name, pid, repr(e)))
if pids_to_kill:
wait_for_process_list_kill(pids_to_kill)
still_existing_pids = get_existing_pids(pids_to_kill)
if still_existing_pids:
logger.warn("These PIDs {0} did not respond to {1} signal. Detailed commands list:\n {2}".format(
", ".join([str(i) for i in still_existing_pids]),
sig_name,
"\n".join([i[2] for i in full_child_pids if i[0] in still_existing_pids])
))
if get_existing_pids(all_child_pids) and error_log: # we're unable to kill all requested PIDs
logger.warn("Process termination error log:\n")
for error_item in error_log:
logger.warn("PID: {0}, Process: {1}, Exception message: {2}".format(*error_item))
def _changeUid():
try:
os.setuid(threadLocal.uid)
except Exception:
logger.warn("can not switch user for running command.")
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class shellRunnerLinux(shellRunner):
# Run any command
def run(self, script, user=None):
import pwd
try:
if user is not None:
user = pwd.getpwnam(user)[2]
else:
user = os.getuid()
threadLocal.uid = user
except Exception:
logger.warn("can not switch user for RUN_COMMAND.")
cmd = script
if isinstance(script, list):
cmd = " ".join(script)
cmd_list = ["/bin/bash","--login","--noprofile","-c", cmd]
p = subprocess32.Popen(cmd_list, preexec_fn=_changeUid, stdout=subprocess32.PIPE,
stderr=subprocess32.PIPE, shell=False, close_fds=True)
out, err = p.communicate()
code = p.wait()
logger.debug("Exitcode for %s is %d" % (cmd, code))
return {'exitCode': code, 'output': out, 'error': err}
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def changeUid():
#No Windows implementation
pass
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def changeUid():
_changeUid()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4847
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
Bein.py | #!/usr/bin/env python3
print("Inicializando...", end=' \r')
import time
# from ev3dev.ev3 import *
print("ev3dev.ev3", end=' \r')
from ev3dev2.motor import OUTPUT_A, OUTPUT_B,OUTPUT_C, MoveTank,MoveSteering, SpeedPercent, LargeMotor
print("motores importados", end=' \r')
from ev3dev2.sensor.lego import ColorSensor,UltrasonicSensor
from ev3dev2.sensor import INPUT_4, INPUT_2, INPUT_3
print("Sensores importados", end=' \r')
from threading import Thread
from math import sqrt
import pickle
print("threading, math e pickle importados", end=' \r')
time.sleep(1)
print("Importacoes concluidas!", end=' \r')
#DECLARAÇÃO DE VARIAVEIS GLOBAIS
rodas=MoveTank(OUTPUT_A,OUTPUT_B)
curva=MoveSteering(OUTPUT_A,OUTPUT_B)
Mochila=LargeMotor(OUTPUT_C)
quads = []
orientacao = 0
# memoria_cor= {}
memoria_cor = {}
plaza=False
cor_atual=""
tentativa=0
c=""
mochila=False
velocidade=15
velocidadeFrente=30
cores = pickle.load(open("Cores.p", "rb"))
Sensor_direita = ColorSensor(INPUT_2)
Sensor_esquerda = ColorSensor(INPUT_4)
Sensor_Tras = ColorSensor(INPUT_1)
Sensor_direita.mode = Sensor_direita.MODE_RGB_RAW
Sensor_esquerda.mode = Sensor_esquerda.MODE_RGB_RAW
Sensor_Tras.mode = Sensor_Tras.MODE_RGB_RAW
Sensor_sonic = UltrasonicSensor(INPUT_3)
Sensor_sonic.mode=Sensor_sonic.MODE_US_DIST_CM
print("Declarando tudo!", end=' \r')
#FUNÇÔES DE LOCOMOÇÂO
def naocaiaRe():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if atualE< 40:
rodas.on(-SpeedPercent(velocidade-5), -SpeedPercent(velocidade))
if atualD<40:
rodas.on(-SpeedPercent(velocidade), -SpeedPercent(velocidade-5))
def retorno():#função para o retorno
global tentativa,c,cor_atual,velocidade
while c!=cor_atual:
naocaiaRe()
rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
if c!= 'White': Confirmar_cor(c)
#tempo para a parada no meio do quadrado
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
#8 é o fator de tempo para ele voltar até o meio do quadrado, Se aumentar ele volta mais para tras
rodas.off()
tentativa+=1#indica que foi feita uma tentativa que falhou
procurar_proximo()#vira conforme as orientações que são possiveis
alinha(0.01,245,15)#anda um pouco a frente para nao o robo não reconhecer o mesmo ponto de referencia como um novo ponto
def naocaia_alinhar():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if(atualE<40):
rodas.on_for_rotations(20,0,0.30)
rodas.on_for_rotations(0,20,0.35)
rodas.on_for_rotations(-30,-30,0.25)
if(atualD<40):
rodas.on_for_rotations(0,20,0.30)
rodas.on_for_rotations(20,0,0.35)
rodas.on_for_rotations(-30,-30,0.25)
def alinha(Kp,target,margem):
global d
erroE=1
erroD=1
if c == 'White':
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
while c=='White' and atualE<280 :
rodas.on(15,15)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
rodas.off()
else:
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
while c!='White' and atualE<280:
rodas.on(-15,-15)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
rodas.off()
over=time.time()
while(erroE != 0 or erroD != 0) :
naocaia_alinhar();
atualD = d[0]+d[1]+d[2]
erroD=atualD - target
if abs(erroD)<margem:
erroD=0
outputD = erroD* (Kp+0.01)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
erroE=atualE - target
if abs(erroE)<margem:
erroE=0
outputE = erroE* (Kp+0.008)
if outputE>40:
outputE = 40
elif outputE<-40:
outputE=-40
if outputD>40:
outputD = 40
if time.time()-over>10:
rodas.off()
erroE=0
erroD=0
if erroE == 0 and erroD == 0:
rodas.off()
else:
rodas.on(outputE,outputD)
while c!='White':
rodas.on(-20,-20)
time.sleep(0.1)
rodas.off()
def andar_frente():#Corrigir todos os tempos presentes aqui a fim de utilizar com o robo e pista finais
global cor_atual,tentativa,quads,c,plaza,memoria_cor
#Vai para frente até ver Black, retorna o tempo percorrido
while 1:
# print(c)
if(c=='Black'):
rodas.off()
retorno()
return
elif c!='White' and c!='Black': #deve ser verdaeiro para uma cor nova diferente de preto e branco
#print(Corfimar_cor(c))
if(Confirmar_cor(c)):
verificar_plaza()
if(len(quads)>0 and plaza==False):memoria_cor[cor_atual]=orientacao
if(plaza==False):quads.append(c)
cor_atual=c
if cor_atual!='White' or cor_atual!='Black':
print('ACHEI: ',cor_atual)
tentativa=0
rodas.off()
procurar_proximo()
alinha(0.01,245,15)
return
while c=='White':
#Anda pelo branco em procura do boneco se a mochila nao esta carregada(mochila==0).Senão apenas anda para frente no branco
procurar_passageiro()
def virar(graus):#função de virada relativa a posiçao
#0.666 é o fator de tempo da virada, alterar para virar 90 graus corretamente e caso mude a velocidade de virada, mude-o de acordo
if graus<0:
if c == 'Red':
rodas.on_for_rotations(-40,40,abs(graus)*(0.770/90))
else:
rodas.on_for_rotations(-40,40,abs(graus)*(0.683/90))
elif(graus==0): pass
else:
if c == 'Red':
rodas.on_for_rotations(40,-40,abs(graus)*(0.770/90))
else:
rodas.on_for_rotations(40,-40,abs(graus)*(0.683/90))#FROM HELL
def procurar_proximo():#função de virar conforme o aprendido, ou a falta dele
global tentativa,cor_atual,orientacao
if (cor_atual not in memoria_cor.keys()):
if (tentativa == 0):
virar(90)
orientacao = 90
if (tentativa == 1):
virar(-90)
orientacao = 0
if (tentativa == 2):
virar(-90)
orientacao = -90
else:virar(memoria_cor[cor_atual])
#Essa função deve conseguir servir para 4 casos(Oque aparecer na lista é o que ele deve tentar pois não conhece):
#1:Direita->Frente->Esquerda(Não conhece nada)
#2:Direita->Esquerda(conhece frente/0)
#3:Frente->Esquerda(conhece direita/90)
#4:Esquerda(conhece direita/90 e frente/0)
#FIM DAS FUNÇÔES DE LOCOMOÇÂO
#FUNÇÕES DE COR
def media(leitura1, leitura2): # FAZ A MÈDIA DAS LEITURAS DOS AMBOS SENSORES
media = []
for x in range(3):
media.append((leitura1[x]+leitura2[x])/2)
return tuple(media)
def cor_mais_proxima(leitura):
global cores
min = 1000
for valor in cores.values():
# DISTANCIA EUCLIDIANA DO VALOR DA LEITURA DO SENSOR QUE FOI USADO COMO ENTRADA COM OS VALORES DAS CORES CALIBRADAS PREVIAMENTE
dist = sqrt(((leitura[0]-valor[0])**2) +
((leitura[1]-valor[1])**2)+((leitura[2]-valor[2])**2))
if(dist < min): # verifica se é menor que o ultimo verificado
min = dist
for key, value in cores.items(): # pega o nome da cor que gerou a menor distancia
if value == valor:
cor = key
return cor
def diferente_de(*cor):
global c
if c not in cor:
return 1
else: return 0
def cor_th():
global c,d
while(1):
c=cor_mais_proxima(Sensor_direita.rgb)
d=Sensor_direita.rgb
def Confirmar_cor(cor_vista):
global c
time.sleep(0.2)
if(c==cor_vista):
cor_atual=c
return True
else:
atualD = d[0]+d[1]+d[2]
if atualD<80:
naocaia()
return False
#FIM DAS FUNÇÕES DE COR
#FUNÇÕES DO PLAZA
def verificar_plaza():
global c, mochila, quad, cor_atual, plaza,velocidade
if(1):
if c!='Black':
mudanca = 0
cor_momento = c
goiaba = Thread(target=rodas.on_for_seconds, args=(-15, -15, 32.22/15,))#32.22 =fator de tempo para o mesmo ir até o meio do quadrado COLORIDO
#Caso altere a velocidade de 15(Caso nao esteja conseguindo subir a rampa), diminua esse fator de acordo
goiaba.start()
while(goiaba.is_alive()):
#print("Checando plaza: ",mudanca)
if (cor_momento != c):
mudanca += 1
cor_momento = c
if(mudanca >= 2):
print("PLAZA")
pickle.dump(quads,open('memoria.p','wb'))#Armazena os quadrados vistos para debugs futuros
plaza=True #Plaza encontrado
quads.append(quad(cor_atual))#coloca o ultimo quadrado antes do plaza no array
tempo=time.time()
rodas.on(-30,-30)
while(c!='Black'):
rodas.on(-(SpeedPercent(velocidade)*1.35), -(SpeedPercent(velocidade)*1.35))
if(diferente_de('Black','White')):
if(Confirmar_cor(c)):
rodas.off()
return
if(plaza==True):
rodas.on(-25,-35)
time.sleep(3)
rodas.off()
time.sleep(49.5/SpeedPercent(velocidade))#esse tempo serve para ele voltar alem das faixas do plaza, não é necessario alterar
par=mochila
solte()#deixa o BONECO
mochila=False
rodas.on_for_seconds((SpeedPercent(velocidade)*1.35), (SpeedPercent(velocidade)*1.35), time.time()-tempo)
while(c=='White'):rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
#8 deve ser o mesmo fator de tempo do retorno
rodas.off()
if par==True:Mochila_sobe()
virar(180)
Volta()
else:pass
goiaba.join()
rodas.off()
def Volta():
global quads,mochila,start_time,c,velocidade
i=len(quads)-2#Indice para o robo ir somente até o ultimo quadrado
while(i>0 and mochila==False):#Se quiser que o robo vá até o ultimo: Tire a condição da mochila
if c!='White':
print(memoria_cor[c])
virar((memoria_cor[c])*(-1))
alinha(0.01,245,15)
procurar_passageiro()
time.sleep(35.22/SpeedPercent(velocidade))#Mesmo fator de tempo do verificar_plaza(ENTRADA DE COLORIDO).
rodas.off()
if(mochila==True ):
virar(90)
virar(90)
alinha(0.01,245,15)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
break
i-=1
#if sensor detectar algo retorna start_time e execute a função de pegar o boneco
if(i==0):
virar(90)
virar(90)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
procurar_passageiro()
verificar_plaza()
#FIM DAS FUNÇÕES DO PLAZA
def naocaia():
global d
atualD = d[0]+d[1]+d[2]
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
if atualE< 40:
rodas.on(-SpeedPercent(velocidadeFrente+30), -SpeedPercent(velocidadeFrente-5))
if atualD< 40:
rodas.on(-SpeedPercent(velocidadeFrente-5), -SpeedPercent(velocidadeFrente+30))
#FUNÇÕES DA MOCHILA(EQUIPAMENTO DE CAPTURAR BONECO)
def procurar_passageiro():
global mochila,c,velocidadeFrente
while c == 'White':
naocaia()
rodas.on(-SpeedPercent(velocidadeFrente), -SpeedPercent(velocidadeFrente+0.5))
# if Sensor_sonic.distance_centimeters<30 and mochila==0 :
# rodas.off()
# pega()
def Mochila_desce():
Mochila.on_for_rotations(SpeedPercent(20), 0.53) ## negativo sobe
def Mochila_solta():
Mochila.on_for_rotations(SpeedPercent(20),0.25)
def Mochila_pega():
Mochila.on_for_rotations(SpeedPercent(-20), 0.25)
def Mochila_sobe():
Mochila.on_for_rotations(SpeedPercent(-20), 0.53)
def solte():
global mochila
rodas.off()
if(mochila==True):
Mochila_solta()
def pega():
global mochila
dist = Sensor_sonic.distance_centimeters
time.sleep(0.5)
rodas.off()
Mochila_desce()
virar(90)
time.sleep(1)
rodas.on_for_seconds(-20,-20,dist*0.05)#regular o valor de forma ao robo pegar o boneco
Mochila_pega()
time.sleep(1)
mochila=True
rodas.on_for_seconds(20,20,dist*0.05)
virar(-90)
rodas.off()
#FIM DAS FUNÇÕES DE MOCHILA
#FUNÇÕES DE INFORMAÇÃO
class quad:#objeto que guarda informações do ponto de referencia encontrado
def __init__(self,cor):
self.cor = cor
# self.tempo = tempo
# self.orientacao=orientacao
#FIM DAS FUNÇÕES DE INFORMAÇÃO
print("Vamos comecar!", end=' \r')
if __name__=="__main__":
start_time=0
plaza = False
ver_cor = Thread(target=cor_th)
ver_cor.daemon=True
ver_cor.start()
time.sleep(0.5)
Mochila_sobe()
while (1):
andar_frente()
#print(c)
#procurar_passageiro()
#virar(-90)
#rodas.on_for_degrees(-40,-40,90)
#time.sleep(2)
#curva.on_for_degrees(50,40,40,660)
#time.sleep(0.3)
# if (tuts=0):#se ver Preto retorna até o ponto de referencia de onde saiu
# retorno()
# # se ver um novo ponto de referencia atualiza a memoria de tal cor, coloca na lista informações relativas ao descoberto e ao ultimo ligado a ele
# if (tuts=1):
# print ('Achei: ',c)
# tentativa=0#reseta a variavel tentativas o que indica que é um novo quadrado
# if(plaza==False and len(quads)>0):
# memoria_cor[cor_atual]=orientacao
# quads.append(quad(cor_atual) |
2_thread_wayA.py | # 创建 Thread 的实例,传给它一个函数
import threading
from time import sleep, ctime
loops = [4, 2]
def loop(nloop, nsec):
print('Start LOOP', nloop, 'at:', ctime())
sleep(nsec)
print('LOOP', nloop, 'DONE at:', ctime())
def main():
print('程序开始,当前时间', ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target=loop, args=(i, loops[i]))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join() # 等待进程完成
print('程序结束,当前时间', ctime())
if __name__ == '__main__':
main()
|
python_set_emit_consume_large_string_columns.py | #!/usr/bin/env python2.7
# encoding: utf8
import os
import sys
import time
import tempfile
import subprocess
import threading
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
sys.path.append(os.path.realpath(__file__ + '/..'))
import udf
from abstract_performance_test import AbstractPerformanceTest
class SetEmitConsumeLargeStringColumnPythonPeformanceTest(AbstractPerformanceTest):
def generate_data_linear(self, multiplier, base=10):
# self.number_of_characters = 2000000
self.number_of_characters = 1864129
columns_definition = ",".join(["column%s VARCHAR(%s)"%(i,self.number_of_characters) for i in range(self.number_of_columns)])
self.column_names = ",".join(["column%s"%(i) for i in range(self.number_of_columns)])
create_table_sql = 'CREATE OR REPLACE TABLE BATCH (%s);'%columns_definition
column_values = ",".join(["a"*self.number_of_characters for i in range(self.number_of_columns)])
tmpdir = tempfile.mkdtemp()
fifo_filename = os.path.join(tmpdir, 'myfifo')
import_table = '''IMPORT into BATCH from local CSV file '%s';'''%fifo_filename
try:
os.mkfifo(fifo_filename)
cmd = '''%(exaplus)s -c %(conn)s -u %(user)s -P %(password)s -s %(schema)s
-no-config -autocommit ON -L -pipe''' % {
'exaplus': os.environ.get('EXAPLUS'),
'conn': udf.opts.server,
'user': self.user,
'password': self.password,
'schema': self.schema
}
env = os.environ.copy()
env['LC_ALL'] = 'en_US.UTF-8'
exaplus = subprocess.Popen(
cmd.split(),
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
write_trhead = threading.Thread(target=self.write_into_fifo, args=(fifo_filename, column_values, base))
write_trhead.start()
sql=create_table_sql+"\n"+import_table+"\n"+"commit;"
out, _err = exaplus.communicate(sql.encode('utf8'))
print(out)
print(_err)
write_trhead.join()
finally:
os.remove(fifo_filename)
os.rmdir(tmpdir)
create_table_sql = 'CREATE OR REPLACE TABLE T (%s);'%columns_definition
self.query(create_table_sql)
for i in range(multiplier):
self.query('''INSERT INTO T select * from BATCH;''')
self.query("commit")
def write_into_fifo(self, fifo_filename, column_values, rows):
with open(fifo_filename,"w") as f:
for i in range(rows):
f.write(column_values)
f.write("\n")
def setUp(self):
self.create_schema();
self.number_of_columns = 9
self.query(udf.fixindent('''
CREATE PYTHON SET SCRIPT CONSUME_NEXT_COLUMNS(...) RETURNS INT AS
def run(ctx):
count = 0
# while(ctx.next()):
for i in range(%s):
stringVal = ctx[0]
# ctx.emit(count)
return count
'''% self.number_of_columns))
self.query("commit")
self.generate_data_linear(10)
def tearDown(self):
self.cleanup(self.schema)
def test_consume_next_columns(self):
self.run_test(15, 3, 2.0, "SELECT CONSUME_NEXT_COLUMNS(%s) FROM T"%self.column_names)
#self.run_test(2, 2.0, "SELECT %s FROM T"%self.column_names)
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
|
webserver.py | #!/usr/bin/env python
# A simple webserver for Naoko that serves up some interesting statistics.
# In integrated mode this is started with Naoko and uses her instance of NaokoDB, though not the thread.
from lib.external.bottle import route, run, default_app, SimpleTemplate, static_file
import logging
from settings import *
import time
import threading
import os.path
from collections import deque
def package(fn, *args, **kwargs):
def action():
fn(*args, **kwargs)
return action
class NaokoWebServer(object):
dbclient = None
def __init__(self, db_queue, db_start, host, port, protocol, room):
self.logger = logging.getLogger("webserver")
self.logger.setLevel(LOG_LEVEL)
self.db_queue = db_queue
self.db_start = db_start
# Only one thread can access a connection object
self.db_done = threading.Event()
# Avoid querying the database twice in a row
self.db_lock = threading.Lock()
self.host = host
self.port = port
self.protocol = protocol
self.room = room
f = open(os.path.join("web","template.html"), 'r')
self.template = SimpleTemplate(f)
f.close()
self.cache = None
self.last_render = 0
def render(self):
if time.time() - self.last_render > 60 * 30:
self.db_lock.acquire()
if time.time() - self.last_render > 60 * 30:
self.getData()
self.db_lock.release()
return self.rendered
def static(self, path):
return static_file(path, root=os.path.join("web", "static"))
def getData(self):
self.logger.debug("Fetching new data from the database")
self.db_done.clear()
self.db_queue.append(self._getData)
self.db_start.set()
self.db_done.wait()
# Takes 4-5 seconds to get everything
# If performance is a problem possible solutions are:
# 1: ajax calls + appears more responsive to the user, not just sitting on a blank page
# - more http requests will be extremely slow on the bottle.py http server, still puts heavy load on the sqlite database
# 2: precalculation/running totals in the database + faster, no extra calls, extra load on the database in negligible, potentially serve new data with every request
# - complicates database more, requires extra tables and additional columns, initial database upgrade may take a very long time, much more difficult to change decisions later
def _getData(self):
averageUsers = map(lambda (x, y): [int(x), y], NaokoWebServer.dbclient.getAverageUsers())
userVideoStats = NaokoWebServer.dbclient.getUserVideoStats()
userChatStats = NaokoWebServer.dbclient.getUserChatStats()
popularVideos = NaokoWebServer.dbclient.getPopularVideos()
# Takes 20+ seconds alone on a 100mb database, unacceptable
#messageStats = NaokoWebServer.dbclient.getMessageCounts()
self.rendered = self.template.render(averageUsers=averageUsers, userChatStats=userChatStats, popularVideos=popularVideos, userVideoStats=userVideoStats, room=self.room)
self.last_render = time.time()
self.db_done.set()
def start(self):
route('/static/<path:path>')(self.static)
route("/")(self.render)
if self.protocol == "fastcgi":
from flup.server.fcgi import WSGIServer
WSGIServer(default_app(), bindAddress=(self.host, int(self.port))).run()
elif self.protocol == "http":
run(host=self.host, port=int(self.port))
# Runs a dedicated thread for accessing the database
def dbloop(dbfile, db_queue, db_signal):
from lib.database import NaokoDB
NaokoWebServer.dbclient = NaokoDB(dbfile)
while db_signal.wait():
db_signal.clear()
while db_queue:
db_queue.popleft()()
if __name__ == "__main__":
# Standalone mode runs the webserver as a daemon
import ConfigParser
import sys, os
from lib.daemon.manager import manageDaemon
config = ConfigParser.RawConfigParser()
config.read("naoko.conf")
dbfile = config.get("naoko", "db_file")
mode = config.get("naoko", "webserver_mode")
host = config.get("naoko", "webserver_host")
port = config.get("naoko", "webserver_port")
protocol = config.get("naoko", "webserver_protocol")
room = config.get("naoko", "room")
assert mode == "standalone", "Web server not set to standalone mode"
assert dbfile and dbfile != ":memory:", "No database file"
def startServer():
logging.basicConfig(format='%(name)-15s:%(levelname)-8s - %(message)s', stream=sys.__stderr__)
db_queue = deque()
db_signal = threading.Event()
dbthread = threading.Thread(target=dbloop, args=[dbfile, db_queue, db_signal])
dbthread.start()
server = NaokoWebServer(db_queue, db_signal, host, port, protocol, room)
server.start()
command = sys.argv[1] if len(sys.argv) > 1 else None
if command == "debug":
startServer()
else:
manageDaemon(startServer, command, sys.argv[0], "/tmp/naokoweb.pid", os.path.abspath(os.getcwd()))
else:
import time
def startServer(naoko):
# Give Naoko time to set up everything
while not hasattr(naoko, "dbclient"):
# Sleep until the NaokoDB is ready
time.sleep(1)
# flup doesn't work from threads that aren't the main thread
assert naoko.webserver_protocol == "http", "Embedded web server only supports http mode."
naoko.logger.debug("Starting web server in embedded mode on %s:%s:%s." % (naoko.webserver_protocol, naoko.webserver_host, naoko.webserver_port))
NaokoWebServer.dbclient = naoko.dbclient
server = NaokoWebServer(naoko.sql_queue, naoko.sqlAction, naoko.webserver_host, naoko.webserver_port, naoko.webserver_protocol, naoko.room)
server.start()
|
binpickingcontrollerclient.py | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2015 MUJIN Inc.
# Mujin controller client for bin picking task
# mujin imports
from . import realtimerobotclient
# logging
import logging
log = logging.getLogger(__name__)
class BinpickingControllerClient(realtimerobotclient.RealtimeRobotControllerClient):
"""mujin controller client for bin picking task
"""
tasktype = 'binpicking'
def __init__(self, regionname=None, **kwargs):
"""logs into the mujin controller, initializes binpicking task, and sets up parameters
:param controllerurl: url of the mujin controller, e.g. http://controller14
:param controllerusername: username of the mujin controller, e.g. testuser
:param controllerpassword: password of the mujin controller
:param binpickingzmqport: port of the binpicking task's zmq server, e.g. 7110
:param binpickingheartbeatport: port of the binpicking task's zmq server's heartbeat publisher, e.g. 7111
:param binpickingheartbeattimeout: seconds until reinitializing binpicking task's zmq server if no heartbeat is received, e.g. 7
:param scenepk: pk of the bin picking task scene, e.g. irex2013.mujin.dae
:param robotname: name of the robot, e.g. VP-5243I
:param regionname: name of the bin, e.g. container1
:param toolname: name of the manipulator, e.g. 2BaseZ
:param envclearance: environment clearance in millimeters, e.g. 20
:param usewebapi: whether to use webapi for controller commands
:param robotaccelmult: optional multiplier for forcing the acceleration
"""
super(BinpickingControllerClient, self).__init__(tasktype=self.tasktype, **kwargs)
# bin picking task
self.regionname = regionname
#########################
# robot commands
#########################
def PickAndPlace(self, goaltype, goals, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, freeinc=[0.08], worksteplength=None, densowavearmgroup=5, regionname=None, cameranames=None, envclearance=None, toolname=None, robotspeed=None, timeout=1000, **kwargs):
"""picks up an object with the targetnamepattern and places it down at one of the goals. First computes the entire plan from robot moving to a grasp and then moving to its destination, then runs it on the real robot. Task finishes once the real robot is at the destination.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the destinations of the target. Only destikparamnames[0] is looked at and tells the system to place the part in any of the ikparams in destikparamnames[0]
:param targetnamepattern: regular expression describing the name of the object, no default will be provided, caller must set this. See https://docs.python.org/2/library/re.html
:param approachoffset: distance in millimeters to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in millimeters
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param freeinc: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: planning parameter
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
Manual Destination Specification (deprecated)
:param goaltype: type of the goal, e.g. translationdirection5d or transform6d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
"""
if worksteplength is None:
worksteplength = 0.01
assert(targetnamepattern is not None)
if regionname is None:
regionname = self.regionname
taskparameters = {
'command': 'PickAndPlace',
'goaltype': goaltype,
'envclearance': envclearance,
'movetodestination': movetodestination,
'goals': goals,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'freeinc': freeinc,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'deletetarget': deletetarget,
'debuglevel': debuglevel,
}
if regionname is not None:
taskparameters['containername'] = regionname
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, toolname=toolname, timeout=timeout)
def StartPickAndPlaceThread(self, goaltype=None, goals=None, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, worksteplength=None, regionname=None, envclearance=None, toolname=None, robotspeed=None, timeout=10, usewebapi=None, **kwargs):
"""Start a background loop to continuously pick up objects with the targetnamepattern and place them down at the goals. The loop will check new objects arriving in and move the robot as soon as it finds a feasible grasp. The thread can be quit with StopPickPlaceThread.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param cycledests: When finished cycling through all destikparamnames, will delete all the targets and start from the first index again doing this for cycledests times. By default it is 1.
:param targetnamepattern: regular expression describing the name of the object, no default will be provided, caller must set this. See https://docs.python.org/2/library/re.html
:param approachoffset: distance in millimeters to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in millimeters
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: robot parameters
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
:param goaltype: type of the goal, e.g. translationdirection5d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param useworkspaceplanner: If 1 is set, will try the workspace planner for moving the hand straight. If 2 is set, will try the RRT for moving straight. Can set 3 for trying both.
:param forceStartRobotValues: planning loop should always start from these values rather than reading from robot
:param initiallyDisableRobotBridge: if True, stops any communication with the robotbridge until robot bridge is enabled
"""
if worksteplength is None:
worksteplength = 0.01
assert(targetnamepattern is not None)
if regionname is None:
regionname = self.regionname
taskparameters = {
'command': 'StartPickAndPlaceThread',
'envclearance': envclearance,
'movetodestination': movetodestination,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'containername': regionname,
'deletetarget': deletetarget,
'debuglevel': debuglevel,
}
if goals is not None:
taskparameters['orderedgoals'] = goals
taskparameters['goaltype'] = goaltype
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, toolname=toolname, timeout=timeout, usewebapi=usewebapi)
def StopPickPlaceThread(self, resetExecutionState=True, resetStatusPickPlace=False, finishCode=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the pick and place thread started with StartPickAndPlaceThread
:param resetExecutionState: if True, then reset the order state variables. By default True
:param resetStatusPickPlace: if True, then reset the statusPickPlace field of hte planning slave. By default False.
:param finishCode: optional finish code to end the cycle with (if it doesn't end with something else beforehand)
"""
taskparameters = {
'command': 'StopPickPlaceThread',
'resetExecutionState': resetExecutionState,
'resetStatusPickPlace': resetStatusPickPlace,
'finishCode': finishCode
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetPickPlaceStatus(self, timeout=10, **kwargs):
"""gets the status of the pick and place thread
:return: status (0: not running, 1: no error, 2: error) of the pick and place thread in a json dictionary, e.g. {'status': 2, 'error': 'an error happened'}
"""
taskparameters = {'command': 'GetPickPlaceStatus'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ComputeIK(self, toolname=None, timeout=10, **kwargs):
"""
:param toolname: tool name, string
:param limit: number of solutions to return, int
:param iktype: grasp (but basically the just the ikparam), string
:param quaternion: grasp (but basically the just the ikparam) quaternion in world cooordinates, float array
:param translation: grasp (but basically the just the ikparam) translation in world cooordinates in mm, float array
:param direction: grasp (but basically the just the ikparam) direction in world cooordinates, float array
:param angle: grasp (but basically the just the ikparam) angle in world cooordinates, float
:param freeincvalue: float, the discretization of the free joints of the robot when computing ik.
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:param preshape: If the tool has fingers after the end effector, specify their values. The gripper DOFs come from **gripper_dof_pks** field from the tool., float array
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIK'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)
def InitializePartsWithPhysics(self, timeout=10, **kwargs):
"""Start a physics simulation where the parts drop down into the bin. The method returns as soon as the physics is initialized, user has to wait for the "duration" or call StopPhysicsThread command.
:param targeturi: the target uri to initialize the scene with
:param numtargets: the number of targets to create
:param regionname: the container name to drop the targets into
:param duration: the duration in seconds to continue the physics until it is stopped.
:param basename: The basename to give to all the new target names. Numbers are suffixed at the end, like basename+'0134'. If not specified, will use a basename derived from the targeturi.
:param deleteprevious: if True, will delete all the previous targets in the scene. By default this is True.
:param forcegravity: if not None, the gravity with which the objects should fall with. If None, then uses the scene's gravity
"""
taskparameters = {'command': 'InitializePartsWithPhysics'}
taskparameters.update(kwargs)
if 'containername' not in taskparameters:
taskparameters['containername'] = self.regionname
return self.ExecuteCommand(taskparameters, timeout=timeout)
def StopPhysicsThread(self, timeout=10, **kwargs):
"""stops the physics simulation started with InitializePartsWithPhysics
"""
taskparameters = {'command': 'StopPhysicsThread'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def JitterPartUntilValidGrasp(self, toolname=None, timeout=10, **kwargs):
"""Select a part that wasn't able to be grasped and jitter its location such that a grasp set is found for it that will take it to the destination.
:param toolname: name of the manipulator
:param targetname: The target to try to grasp.
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams.
:param approachoffset: The approach distance for simulating full grasp.
:param departoffsetdir: The depart distance for simulating full grasp.
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param desttargetname: The destination target name where the destination goal ikparams come from. If no name is specified, then robot won't consider putting the target into the destination when it searches for grasps.
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param jitterdist: Amount to jitter the target object translation by
:param jitterangle: Amount to jitter the target object's orientation angle
:param jitteriters: Number of times to try jittering before giving up.
:return: If failed, an empty dictionary. If succeeded, a dictionary with the following keys:
- translation: the new translation of the target part
- quaternion: the new quaternion of the target part
- jointvalues: robot joint values that are grasping the part (fingers are at their preshape).
- graspname: the grasp name used for jointvalues. If empty, then no grasp was found.
- destikname: the name of the destination ikparam that was chosen with the grasp
- destjointvalues: robot joint values at one of the specified destinations (fingers are at their final positions).
- desttranslation: the new translation of the target part
- destquaternion: the new quaternion of the target part
"""
taskparameters = {'command': 'JitterPartUntilValidGrasp'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)
####################
# scene commands
####################
def IsRobotOccludingBody(self, bodyname, cameraname, timeout=10, **kwargs):
"""returns if the robot is occluding body in the view of the specified camera
:param bodyname: name of the object
:param cameraname: name of the camera
:return: the occlusion state in a json dictionary, e.g. {'occluded': 0}
"""
taskparameters = {
'command': 'IsRobotOccludingBody',
'bodyname': bodyname,
'cameraname': cameraname,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickedPositions(self, unit='m', timeout=10, **kwargs):
"""returns the poses and the timestamps of the picked objects
:param unit: unit of the translation
:return: the positions and the timestamps of the picked objects in a json dictionary, info of each object has the format of quaternion (w,x,y,z) followed by x,y,z translation (in mm) followed by timestamp in milisecond e.g. {'positions': [[1,0,0,0,100,200,300,1389774818.8366449],[1,0,0,0,200,200,300,1389774828.8366449]]}
"""
taskparameters = {
'command': 'GetPickedPositions',
'unit': unit,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickAndPlaceLog(self, timeout=10, **kwargs):
"""Gets the recent pick-and-place log executed on the binpicking server. The internal server keeps the log around until the next Pick-and-place command is executed.
:param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:return:
total: 10
messages: [
{
"message":"message1",
"type":"",
"level":0,
"data": {
"jointvalues":[0,0,0,0,0,0]
}
},
]
"""
taskparameters = {'command': 'GetPickAndPlaceLog',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def MoveRobotOutOfCameraOcclusion(self, regionname=None, robotspeed=None, toolname=None, timeout=10, **kwargs):
"""moves the robot out of camera occlusion and deletes targets if it was in occlusion.
:param toolname: name of the tool to move when avoiding
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
"""
if regionname is None:
regionname = self.regionname
taskparameters = {
'command': 'MoveRobotOutOfCameraOcclusion',
'containername': regionname,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, toolname=toolname, timeout=timeout)
def PausePickPlace(self, timeout=10, **kwargs):
taskparameters = {
'command': 'PausePickPlace',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ResumePickPlace(self, timeout=10, **kwargs):
taskparameters = {
'command': 'ResumePickPlace',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SendStateTrigger(self, stateTrigger, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""
:param stateTrigger: a string that represents a unique trigger
"""
taskparameters = {
'command': 'SendStateTrigger',
'stateTrigger': stateTrigger,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ResetCachedRobotConfigurationState(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
taskparameters = {
'command': 'ResetCachedRobotConfigurationState',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def SetCycleLocationsProhibited(self, prohibited, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
taskparameters = {
'command': 'SetCycleLocationsProhibited',
'prohibited': bool(prohibited),
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetBinpickingState(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
taskparameters = {'command': 'GetBinpickingState'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def SetStopPickPlaceAfterExecutionCycle(self, timeout=10, **kwargs):
taskparameters = {
'command': 'SetStopPickPlaceAfterExecutionCycle',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def PutPartsBack(self, trajectoryxml, numparts, toolname=None, grippervalues=None, usewebapi=False, timeout=100, **kwargs):
"""runs saved planningresult trajs
"""
taskparameters = {
'command': 'PutPartsBack',
'trajectory': trajectoryxml,
'numparts': numparts,
'toolname': toolname,
}
if grippervalues is not None:
taskparameters['grippervalues'] = grippervalues
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def GenerateGraspModelFromIkParams(self, graspsetname, targeturi, toolname, robotname=None, usewebapi=True,
timeout=10, **kwargs):
"""
Generate grasp model ik for given setup
:param graspsetname: str. Name of graspset like 'all5d'
:param targeturi: str. uri of target scene like '4902201402644.mujin.dae'
:param toolname: str. Name of manipulator of the robot like 'suction0'
:param robotname:
:param usewebapi:
:param timeout:
:return:
"""
taskparameters = {
'command': 'GenerateGraspModelFromIkParams',
'graspsetname': graspsetname,
'targeturi': targeturi,
'toolname': toolname
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, robotname=robotname, toolname=toolname, usewebapi=usewebapi,
timeout=timeout)
def CheckGraspModelIk(self, graspsetname, targeturi, toolname, ikparamnames=None, usewebapi=True, timeout=10, **kwargs):
"""
Check if grasp model is generated for given setup
:param graspsetname: str. Name of graspset like 'all5d'
:param targeturi: str. uri of target scene like 'mujin:4902201402644.mujin.dae'
:param toolname: str. Name of manipulator of the robot like 'suction0'
:param usewebapi:
:return:
"""
taskparameters = {
'command': 'CheckGraspModelIk',
'graspsetname': graspsetname,
'targeturi': targeturi,
'toolname': toolname,
'ikparamnames': ikparamnames,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)
def SetCurrentLayoutDataFromPLC(self, containername, containerLayoutSize, destObstacleName, ioVariableName, timeout=10, usewebapi=True, **kwargs):
"""
sets current layout from plc
"""
taskparameters = {
'command': 'SetCurrentLayoutDataFromPLC',
'containername': containername,
'containerLayoutSize': containerLayoutSize,
'ioVariableName': ioVariableName,
'destObstacleName': destObstacleName
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=False)
def SendCurrentLayoutData(self, containername, containerLayoutSize, ioVariableName, includeTargetsWithPrefix, timeout=10, usewebapi=True, **kwargs):
'''
requests for sending layoutdata to plc
'''
taskparameters = {
'command': 'SendCurrentLayoutData',
'containername': containername,
'containerLayoutSize': containerLayoutSize,
'ioVariableName': ioVariableName,
'includeTargetsWithPrefix': includeTargetsWithPrefix
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=False)
def ClearVisualization(self, timeout=10, usewebapi=True, fireandforget=False, **kwargs):
"""
clears visualization
"""
taskparameters = {'command': 'ClearVisualization'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetPlanStatistics(self, timeout=1, usewebapi=True, fireandforget=False, **kwargs):
"""
get plan and execute statistics of the last pick and place
"""
taskparameters = {'command': 'GetPlanStatistics'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ResetCurrentLayoutData(self, usewebapi=False, fireandforget=True, **kwargs):
"""
resets current layout data
"""
taskparameters = {'command': 'ResetCurrentLayoutData'}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, fireandforget=fireandforget)
def SetCurrentLayoutDataSendOnObjectUpdateData(self, doUpdate, containername=None, containerLayoutSize=None, ioVariableName=None, usewebapi=False, fireandforget=True, **kwargs):
"""
Sets currentLayoutDataSendOnObjectUpdateData structure
:param doUpdate: if True then currentLayoutData will be send on every ObjectUpdate, else currentLayoutDataSendOnObjectUpdate structure is reset
"""
taskparameters = {
'command': 'SetCurrentLayoutDataSendOnObjectUpdateData',
'doUpdate': doUpdate,
}
if containername is not None:
taskparameters['containername'] = containername
if containerLayoutSize is not None:
taskparameters['containerLayoutSize'] = containerLayoutSize
if ioVariableName is not None:
taskparameters['ioVariableName'] = ioVariableName
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, fireandforget=fireandforget)
def StartPackFormationComputationThread(self, timeout=10, debuglevel=4, toolname=None, usewebapi=None, **kwargs):
"""Start a background loop to copmute packing formation.
"""
taskparameters = {
'command': 'StartPackFormationComputationThread',
'debuglevel': debuglevel,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout, usewebapi=usewebapi)
def StopPackFormationComputationThread(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the packing computation thread thread started with StartPackFormationComputationThread
"""
taskparameters = {
'command': 'StopPackFormationComputationThread',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def VisualizePackingState(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the packing computation thread thread started with StartPackFormationComputationThread
"""
taskparameters = {
'command': 'VisualizePackingState',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def VisualizePackFormationResult(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the packing computation thread thread started with StartPackFormationComputationThread
:param initializeCameraPosition: bool. reset camera position
"""
taskparameters = {
'command': 'VisualizePackFormationResult',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetPackFormationSolution(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the packing computation thread thread started with StartPackFormationComputationThread
"""
taskparameters = {
'command': 'GetPackFormationSolution',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def SendPackFormationComputationResult(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""stops the packing computation thread thread started with StartPackFormationComputationThread
"""
taskparameters = {
'command': 'SendPackFormationComputationResult',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def GetLatestPackFormationResultList(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""
Gets latest pack formation computation result
"""
taskparameters = {
'command': 'GetLatestPackFormationResultList',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ClearPackingStateVisualization(self, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""
Clear packing visualization
"""
taskparameters = {
'command': 'ClearPackingStateVisualization',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
def ValidatePackFormationResultList(self, packFormationResultList, timeout=10, usewebapi=None, fireandforget=False, **kwargs):
"""
Validates pack formation result list and compute info (fillRatio, packageDimensions, packedItemsInfo, etc) about it .
kwargs should be packing parameters
:return dictionary {'validatedPackFormationResultList':[{'validationStatus', 'errorCode', 'errorDesc', (optional)'packFormationResult'}]}
"""
taskparameters = {
'command': 'ValidatePackFormationResultList',
'packFormationResultList': packFormationResultList
}
taskparameters.update(kwargs)
ret = self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
return ret
def ComputeSamePartPackResultBySimulation(self, timeout=100, usewebapi=None, **kwargs):
"""
Compute pack formation for single part type.
"""
taskparameters = {
'command': 'ComputeSamePartPackResultBySimulation',
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi)
|
multiplexer.py | from .asyncsocket import *
import socket
import struct
from threading import Lock, Thread
import time
import logging
logger = logging.getLogger("qlibs.net.multiplexer")
base_struct = struct.Struct("!iiid")
class PlayerJoinedEvent:
name = "playerjoined"
def __init__(self, player_id):
self.player_id = player_id
def __str__(self):
return f"Player joined {self.player_id}"
class PlayerLeftEvent:
name = "playerleft"
def __init__(self, player_id):
self.player_id = player_id
def __str__(self):
return f"Player left {self.player_id}"
class PayloadEvent:
name = "payload"
def __init__(self, player_id, data):
if type(data) != bytes:
raise ValueError("Data is not bytes")
self.player_id = player_id
self.data = data
def __repr__(self):
return f"PayloadEvent({self.player_id}, {self.data})"
class ReadyEvent:
name = "ready"
def __init__(self, timedelta):
self.timedelta = timedelta
class ReconstructEvent:
name = "reconstruct"
def __init__(self, data):
if type(data) != bytes:
raise ValueError("Data is not bytes")
self.data = data
class MultiplexerException(Exception): pass
def convert_event(event):
if event.name == "ready":
return base_struct.pack(1, 0, 0, event.timedelta)
elif event.name == "payload":
return base_struct.pack(2, event.player_id, 0, 0) + event.data
elif event.name == "playerjoined":
return base_struct.pack(3, event.player_id, 0, 0)
elif event.name == "playerleft":
return base_struct.pack(4, event.player_id, 0, 0)
elif event.name == "reconstruct":
return base_struct.pack(5, 0, 0, 0) + event.data
else:
raise ValueError("Unknown event %s" % event)
class MultiplexServer:
"""
Server for multiplexer
"""
def __init__(self, host="0.0.0.0", port=55126, engine_packer=None):
sock = socket.socket()
sock.bind((host, port))
sock.listen()
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket_selector = ServerSelector(sock, self._on_connect, self._on_read)
self.events = []
self.passed_events = []
self.current_player_id = -1
self.fd_to_id = dict()
self.step = 0
self.ready_players = set()
self.players = 0
self.run_thread = True
self.last_ready = time.monotonic()
self.engine_packer = engine_packer
self.state = None
#if self.engine_packer is not None:
# self.state = self.engine_packer()
self.time_between_selection = 0.001
self.last_pack = time.monotonic()
self.pack_delay = 2
def _on_connect(self, sock, addr):
logger.info("Connection from %s", addr)
self.current_player_id += 1
self.fd_to_id[sock.fileno()] = self.current_player_id
sock = PacketSocket(sock, bytes_packet_reciever)
data = base_struct.pack(0, self.current_player_id, self.step, 0) #Hello packet
sock.send(bytes_packet_sender(data))
if self.engine_packer is not None:
if time.monotonic() - self.last_pack > self.pack_delay:
self.state = self.engine_packer() or self.state
self.passed_events.clear()
if self.state is not None:
self.last_pack = time.monotonic()
pl = convert_event(ReconstructEvent(self.state))
logger.debug("Sending reconstruct packet len %s", len(pl))
sock.send(bytes_packet_sender(pl))
self.events.append(PlayerJoinedEvent(self.current_player_id))
for event in self.passed_events:
sock.send(bytes_packet_sender(convert_event(event)))
self.players += 1
logger.info("Done, currently %s online", self.players)
return sock
def _on_read(self, sock):
player_id = self.fd_to_id[sock.fileno()]
packets = sock.recv()
for packet in packets:
aux_data, payload = packet[:base_struct.size], packet[base_struct.size:]
aux_data = base_struct.unpack(aux_data)
if aux_data[0] == 1: #Ready
self.ready_players.add(player_id)
self._check_all_ready()
elif aux_data[0] == 2: #Data
self.events.append(PayloadEvent(player_id, payload))
if sock.closed:
self.socket_selector.unregister(sock)
self.events.append(PlayerLeftEvent(player_id))
self.players -= 1
logger.info("Player left")
self.ready_players.discard(player_id)
self._check_all_ready()
return
def _check_all_ready(self):
if len(self.ready_players) == self.players:
self._all_ready()
self.ready_players.clear()
def _all_ready(self):
curr = time.monotonic()
#logger.debug("All ready in %.2f ms", (curr-self.last_ready)*1000)
self.events.append(ReadyEvent(curr-self.last_ready))
self.last_ready = curr
eventdata = b"".join(map(bytes_packet_sender, map(convert_event, self.events)))
for event in self.events:
self.passed_events.append(event) #TODO: Send events when they are recieved
# packet = bytes_packet_sender(convert_event(event))
for sock in self.socket_selector.socket_iterator:
sock.send(eventdata)
self.events.clear()
def serve_forever(self):
while self.run_thread:
self.socket_selector.select()
time.sleep(self.time_between_selection)
def serve_in_thread(self):
self._thread = Thread(target=self.serve_forever, daemon=True, name="multiplexer server")
self._thread.start()
def stop_thread(self):
self.run_thread = False
class MultiplexClient:
"""
Client for multiplexer
"""
def __init__(self, engine, engine_constructor=None, host="localhost", port=55126):
#Engine should be a class with step method, accepting float(deltatime) and list of events
sock = socket.socket()
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.connect((host, port))
self.pending_packets = []
self.socket = PacketSocket(sock, bytes_packet_reciever)
self.engine = engine
self.engine_constructor = engine_constructor
self.packets = list()
self.player_id = None
self.socket_lock = Lock()
self.ready_to_step = True
self.last_step = 0
self.last_confirmed_step = 0
self.min_step_time = 0.5
self._recv_packets()
def _recv_packets(self):
packets = self.socket.recv()
for packet in packets:
aux_data, payload = packet[:base_struct.size], packet[base_struct.size:]
try:
aux_data = base_struct.unpack(aux_data)
except struct.error as e:
raise
if aux_data[0] == 0:
self.player_id = aux_data[1]
elif aux_data[0] == 1: #Next step
self.last_confirmed_step = time.monotonic()
self.engine.step(aux_data[3], self.packets)
self.packets.clear()
self.ready_to_step = True
elif aux_data[0] == 2:
self.packets.append(PayloadEvent(aux_data[1], payload))
elif aux_data[0] == 3:
self.packets.append(PlayerJoinedEvent(aux_data[1]))
elif aux_data[0] == 4:
self.packets.append(PlayerLeftEvent(aux_data[1]))
elif aux_data[0] == 5:
if self.engine_constructor is None:
raise MultiplexerException("Server requested engine reconstruction but engine_constructor is None")
logging.debug("Reconstructing engine")
self.engine = self.engine_constructor(payload)
def step(self):
if self.ready_to_step and time.monotonic() - self.last_step > self.min_step_time:
#logging.debug("Waiting for lock...")
with self.socket_lock:
#logging.debug("Performing step...")
self.last_step = time.monotonic()
self.pending_packets.append(bytes_packet_sender(base_struct.pack(1, 0, 0, 0)))
data = b"".join(self.pending_packets)
self.pending_packets.clear()
self.socket.send(data)
#logging.debug("Done!")
with self.socket_lock:
self.socket.send()
self._recv_packets()
def send_payload(self, data):
with self.socket_lock:
self.pending_packets.append(bytes_packet_sender(base_struct.pack(2, 0, 0, 0)+data))
def _eternal_runner(self):
while self._shall_continue:
self.step()
time.sleep(max(0, min(0.01, self.last_step + self.min_step_time - time.monotonic())))
#time.sleep(0.1)
if self.socket.reset:
logger.warning("Socket is reset, stopping client")
self._shall_continue = False
def thread_runner(self):
self._thread = Thread(target=self._eternal_runner, name="multiplex-client", daemon=True)
self._shall_continue = True
self._thread.start()
def run_in_thread(self):
self.thread_runner()
def stop_thread(self):
self._shall_continue = False
self._thread.join()
|
session_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
ops._USE_C_API = True
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
@test_util.disable_c_api # Partial runs don't work with C API
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
@test_util.disable_c_api # No shape registration for 'ConstructionFails'
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
@test_util.disable_c_api # Operation._set_device doesn't work with C API
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.disable_c_api # Partial runs don't work with C API
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
# Passing the config to the server, but not the session should still result
# in logging device placement.
config = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/job:local/replica:0/task:0/cpu:0' in str(log), str(log))
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = control_flow_ops.enter(data, 'foo_1', False)
enter_2 = control_flow_ops.enter(data, 'foo_2', False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testGraphOptimizer(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=False, constant_folding=True)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
with ops.Graph().as_default() as g:
r1 = random_ops.random_normal(shape=[2, 3], name='R1')
r2 = random_ops.random_normal(shape=[2, 3], name='R2')
copy1 = array_ops.stop_gradient(r1)
copy2 = array_ops.identity(r2)
result = copy1 + copy2
with session.Session(graph=g, config=config) as sess:
metadata = config_pb2.RunMetadata()
sess.run(result, run_metadata=metadata)
# Check that we optimized the graph by looking at the cost model: the add
# node should have been reconnected directly to the R1 and R2 nodes.
found_valid_nodes = 0
for node in metadata.cost_graph.node:
if node.name == 'R1':
r1_cost_id = node.id
found_valid_nodes += 1
if node.name == 'R2':
r2_cost_id = node.id
found_valid_nodes += 1
if node.name == 'add':
if node.input_info[0].preceding_node == r1_cost_id:
self.assertEqual(node.input_info[1].preceding_node, r2_cost_id)
found_valid_nodes += 1
elif node.input_info[0].preceding_node == r2_cost_id:
self.assertEqual(node.input_info[1].preceding_node, r1_cost_id)
found_valid_nodes += 1
self.assertEqual(3, found_valid_nodes)
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
if __name__ == '__main__':
googletest.main()
|
bot.py | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017-2019 TwitchIO
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import importlib
import inspect
import itertools
import sys
import threading
import traceback
import uuid
from typing import Union, List, Tuple
from .core import Command, AutoCog
from .errors import *
from .stringparser import StringParser
from twitchio.client import Client
from twitchio.dataclasses import Context
from twitchio.errors import ClientError
from twitchio.webhook import TwitchWebhookServer
from twitchio.websocket import WebsocketConnection
class Bot(Client):
"""Twitch IRC Bot.
Parameters
------------
irc_token: str
The OAuth token to use for IRC.
client_id: str:
Your application ID, used for HTTP endpoints.
prefix: str
The bots prefix.
nick: str
The bots nick in lowercase.
loop: [Optional]
The asyncio event loop to use.
initial_channels: list
The initial channels for the bot to join on startup.
webhook_server: bool [Optional]
A bool indicating whether the built-in webhook server should be used.
local_host: str [Optional]
The local host the webhook server should run on.
external_host: str [Optional]
The external address the webhook_server should lsiten on.
port: int [Optional]
The port the webhook_server should be started on.
callback: str [Optional]
The pages as a string where the webhook_server should lsiten for events.
Notes
-------
.. note::
To enable the webhook server, the webhook_server parameter must be True.
A local_host, external_host and port must also be provided.
An optional parameter `callback` may be passed. This should be the page Twitch sends data to.
A long random string, such as hex, is advised e.g `2t389hth892t3h898hweiogtieo`
"""
def __init__(self, irc_token: str, api_token: str=None, *, client_id: str=None, prefix: Union[list, tuple, str],
nick: str, loop: asyncio.BaseEventLoop=None, initial_channels: Union[list, tuple]=None,
webhook_server: bool=False, local_host: str=None, external_host: str=None, callback: str=None,
port: int=None, **attrs):
self.loop = loop or asyncio.get_event_loop()
super().__init__(loop=self.loop, client_id=client_id, **attrs)
self.nick = nick
self.initial_channels = initial_channels
self._ws = WebsocketConnection(bot=self, loop=self.loop, http=self.http, irc_token=irc_token,
nick=nick, initial_channels=initial_channels, **attrs)
self._webhook_server = None
if webhook_server:
self._webhook_server = TwitchWebhookServer(bot=self,
local=local_host,
external=external_host,
callback=callback,
port=port)
loop = asyncio.new_event_loop()
thread = threading.Thread(target=self._webhook_server.run_server, args=(loop, ), daemon=True)
thread.start()
self.loop.create_task(self._prefix_setter(prefix))
self.extra_listeners = {}
self.commands = {}
self.modules = {}
self.cogs = {}
self._aliases = {}
self._checks = []
self.prefixes = None
self._init_methods()
def _init_methods(self):
commands = inspect.getmembers(self)
for _, obj in commands:
if not isinstance(obj, Command):
continue
obj.instance = self
try:
self.add_command(obj)
except CommandError:
traceback.print_exc()
continue
def add_command(self, command):
if not isinstance(command, Command):
raise TypeError('Commands passed my be a subclass of Command.')
elif command.name in self.commands:
raise CommandError(f'Failed to load command <{command.name}>, a command with that name already exists')
elif not inspect.iscoroutinefunction(command._callback):
raise CommandError(f'Failed to load command <{command.name}>. Commands must be coroutines.')
self.commands[command.name] = command
if not command.aliases:
return
for alias in command.aliases:
if alias in self.commands:
del self.commands[command.name]
raise CommandError(
f'Failed to load command <{command.name}>, a command with that name/alias already exists.')
self._aliases[alias] = command.name
def remove_command(self, command):
if command.aliases:
for a in command.aliases:
self._aliases.pop(a)
try:
del self.commands[command.name]
except KeyError:
# Not sure why this would happen, but people be people.
pass
def load_module(self, name: str):
"""Method which loads a module and it's cogs.
Parameters
------------
name: str
The name of the module to load in dot.path format.
"""
if name in self.modules:
return
valid = False
module = importlib.import_module(name)
for _, member in inspect.getmembers(module):
if inspect.isclass(member) and issubclass(member, AutoCog):
member(self)._prepare(self)
valid = True
if hasattr(module, 'prepare'):
module.prepare(self)
elif not valid:
del module
del sys.modules[name]
raise ImportError(f'Module <{name}> is missing a prepare method')
if name not in self.modules:
self.modules[name] = module
def unload_module(self, name: str):
"""Method which unloads a module and it's cogs/commands/events.
Parameters
------------
name: str
The name of the module to load in dot.path format.
"""
module = self.modules.pop(name, None)
if not module:
return
for cogname, _ in inspect.getmembers(module):
if cogname in self.cogs:
self.remove_cog(cogname)
try:
module.breakdown(self)
finally:
del module
del sys.modules[name]
def add_cog(self, cog):
"""Method which loads a cog and adds it's commands and events.
Parameters
------------
cog:
An instance of the cog you wish to load.
"""
members = inspect.getmembers(cog)
for name, member in members:
if isinstance(member, Command):
member.instance = cog
self.add_command(member)
elif name.startswith('event_'):
self.add_listener(member, name)
self.cogs[type(cog).__name__] = cog
def remove_cog(self, cogname: str):
"""Method which removes a cog and adds it's commands and events.
Parameters
------------
cogname:
The name of the cog you wish to remove.
"""
cog = self.cogs.pop(cogname, None)
if not cog:
return
for name, member in inspect.getmembers(cog):
if isinstance(member, Command):
self.remove_command(member)
elif name.startswith('event_'):
del self.extra_listeners[name]
elif name in self.extra_listeners:
del self.extra_listeners[member.__name__]
try:
unload = getattr(cog, f'_{cog.__name__}__unload')
except AttributeError:
pass
else:
unload(self)
del cog
def add_check(self, func):
"""Adds a global check to the bot.
Parameters
------------
func : callable
The function or coroutine to add as a global check to the bot.
"""
self._checks.append(func)
def remove_check(self, func):
"""Remove a global check from the bot.
Parameters
------------
func : callable
The function to remove as a global check from the bot.
"""
self._checks.remove(func)
def run(self):
"""A blocking call that initializes the IRC Bot event loop.
This should be the last function to be called.
.. warning::
You do not need to use this function unless you are accessing the IRC Endpoints.
.. warning::
You do not use this function if you are using :meth:`.start`
"""
loop = self.loop or asyncio.get_event_loop()
loop.run_until_complete(self._ws._connect())
try:
loop.run_until_complete(self._ws._listen())
except KeyboardInterrupt:
pass
finally:
self._ws.teardown()
async def start(self):
"""|coro|
An asynchronous call which starts the IRC Bot event loop.
This should only be used when integrating Twitch Bots with Discord Bots.
:meth:`.run` should be used instead.
.. warning::
Do not use this function if you are using :meth:`.run`
"""
await self._ws._connect()
try:
await self._ws._listen()
except KeyboardInterrupt:
pass
finally:
self._ws.teardown()
async def _prefix_setter(self, item):
if inspect.iscoroutinefunction(item):
item = await item()
elif callable(item):
item = item()
if isinstance(item, (list, tuple)):
self.prefixes = item
elif isinstance(item, str):
self.prefixes = [item]
else:
raise ClientError('Invalid prefix provided. A list, tuple, str or callable returning either should be used.')
async def _get_prefixes(self, message):
prefix = ret = self.prefixes
if callable(prefix):
ret = prefix(self, message.content)
if inspect.isawaitable(ret):
ret = await ret
if isinstance(ret, (list, tuple)):
ret = [p for p in ret if p]
if isinstance(ret, str):
ret = [ret]
if not ret:
raise ClientError('Invalid prefix provided.')
return ret
async def get_prefix(self, message):
prefixes = await self._get_prefixes(message)
prefix = None
content = message.content
for pre in prefixes:
if content.startswith(pre):
prefix = pre
break
return prefix
def get_channel(self, name: str):
"""Retrieves a :class:`.Channel` from cache.
Parameters
------------
name: str
The channel name to retrieve from cache.
"""
cache = self._ws._channel_cache.get(name.lower())
if cache:
return cache['channel']
return None
async def join_channels(self, channels: Union[List[str], Tuple[str]]):
"""|coro|
Join the specified channels.
Parameters
------------
channels: Union[List[str], Tuple[str]]
The channels in either a list or tuple form to join.
"""
await self._ws.join_channels(*channels)
async def part_channels(self, channels: Union[List[str], Tuple[str]]):
"""|coro|
Part the specified channels.
Parameters
------------
channels: Union[List[str], Tuple[str]]
The channels in either a list or tuple form to part.
"""
await self._ws.part_channels(*channels)
async def get_context(self, message, cls=None):
"""|coro|
A function which creates context with the given message.
A custom context class can be passed.
Parameters
------------
message: :class:`.Message`
The message to create context from.
cls: Optional[Type]
The optional custom class to create Context.
Returns
---------
:class:`.Context`
The context created.
"""
prefix = await self.get_prefix(message)
if not cls:
cls = Context
ctx = cls(message=message, channel=message.channel, user=message.author, prefix=prefix)
return ctx
async def _dispatch(self, event: str, *args, **kwargs):
await self._ws._dispatch(event, *args, **kwargs)
async def _handle_checks(self, ctx, no_global_checks=False):
command = ctx.command
if no_global_checks:
checks = [predicate for predicate in command._checks]
else:
checks = [predicate for predicate in itertools.chain(self._checks, command._checks)]
if not checks:
return True
for predicate in checks:
if inspect.iscoroutinefunction(predicate):
result = await predicate(ctx)
else:
result = predicate(ctx)
if not result:
return predicate
return result
async def handle_commands(self, message, ctx=None):
if ctx is None:
try:
ctx = await self.get_context(message)
except Exception as e:
return await self.event_error(e, message.raw_data)
if not ctx.prefix:
return
content = message.content
content = content[len(ctx.prefix)::].lstrip(' ')
parsed = StringParser().process_string(content)
message.clean_content = ' '.join(parsed.values())
try:
command = parsed.pop(0)
except KeyError:
return
try:
command = self._aliases[command]
except KeyError:
pass
try:
if command in self.commands:
command = self.commands[command]
elif command:
raise CommandNotFound(f'<{command}> was not found.')
else:
return
except Exception as e:
ctx.command = None
return await self.event_command_error(ctx, e)
ctx.command = command
instance = ctx.command.instance
try:
result = await self._handle_checks(ctx, command.no_global_checks)
except Exception as e:
return await self.event_command_error(ctx, e)
else:
if callable(result):
return await self.event_command_error(ctx, CheckFailure(f'The command <{command.name}> failed to invoke'
f' due to checks:: {result.__name__}'))
elif not result:
raise CheckFailure(f'The command <{command.name}> failed to invoke due to checks.')
try:
ctx.args, ctx.kwargs = await command.parse_args(instance, parsed)
await self.global_before_hook(ctx)
if ctx.command._before_invoke:
await ctx.command._before_invoke(instance, ctx)
if instance:
await ctx.command._callback(instance, ctx, *ctx.args, **ctx.kwargs)
else:
await ctx.command._callback(ctx, *ctx.args, **ctx.kwargs)
except Exception as e:
if ctx.command.on_error:
await ctx.command.on_error(instance, ctx, e)
await self.event_command_error(ctx, e)
try:
# Invoke our after command hooks...
if command._after_invoke:
await ctx.command._after_invoke(ctx)
await self.global_after_hook(ctx)
except Exception as e:
await self.event_command_error(ctx, e)
async def global_before_hook(self, ctx):
"""|coro|
Method which is called before any command is about to be invoked.
This method is useful for setting up things before command invocation. E.g Database connections or
retrieving tokens for use in the command.
Parameters
------------
ctx:
The context used for command invocation.
Examples
----------
.. code:: py
async def global_before_hook(self, ctx):
# Make a database query for example to retrieve a specific token.
token = db_query()
ctx.token = token
async def my_command(self, ctx):
data = await self.create_clip(ctx.token, ...)
Note
------
The global_before_hook is called before any other command specific hooks.
"""
pass
async def global_after_hook(self, ctx):
"""|coro|
Method which is called after any command is invoked regardless if it failed or not.
This method is useful for cleaning up things after command invocation. E.g Database connections.
Parameters
------------
ctx:
The context used for command invocation.
Note
------
The global_after_hook is called after the command successfully invokes.
"""
pass
async def event_webhook(self, data):
"""|coro|
Event which is fired when a message from a Webhook subscription is received.
Parameters
------------
data: dict
The webhook data as JSON.
Warning
---------
This event is only applicable when using the built in webhook server.
"""
pass
async def event_raw_pubsub(self, data):
"""|coro|
Event which fires when a PubSub subscription event is received.
Parameters
------------
data:
The raw data received from the PubSub event.
Notes
-------
.. note::
No parsing is done on the JSON and thus the data will be raw.
A new event which parses the JSON will be released at a later date.
"""
pass
async def event_pubsub(self, data):
raise NotImplementedError
async def pubsub_subscribe(self, token: str, *topics):
"""|coro|
Method which sends a LISTEN event over PubSub. This subscribes you to the topics provided.
Parameters
------------
token: str [Required]
The oAuth token to use to subscribe.
\*topics: Union[str] [Required]
The topics to subscribe to.
Raises
--------
WSConnectionFailure
The PubSub websocket failed to connect.
ClientError
You reached the maximum amount of PubSub connections/Subscriptions.
Returns
---------
nonce: str
The nonce associated with this subscription. Useful for validating responses.
"""
nonce = uuid.uuid4().hex
connection = await self._ws._pubsub_pool.delegate(*topics)
await connection.subscribe(token, nonce, *topics)
return nonce
async def event_command_error(self, ctx, error):
"""|coro|
Event called when an error occurs during command invocation.
Parameters
------------
ctx: :class:`.Context`
The command context.
error: :class:`.Exception`
The exception raised while trying to invoke the command.
"""
print('Ignoring exception in command: {0}:'.format(error), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
async def event_mode(self, channel, user, status):
"""|coro|
Event called when a MODE is received from Twitch.
Parameters
------------
channel: :class:`.Channel`
Channel object relevant to the MODE event.
user: :class:`.User`
User object containing relevant information to the MODE.
status: str
The JTV status received by Twitch. Could be either o+ or o-.
Indicates a moderation promotion/demotion to the :class:`.User`
"""
pass
async def event_userstate(self, user):
"""|coro|
Event called when a USERSTATE is received from Twitch.
Parameters
------------
user: :class:`.User`
User object containing relevant information to the USERSTATE.
"""
pass
async def event_raw_usernotice(self, channel, tags: dict):
"""|coro|
Event called when a USERNOTICE is received from Twitch.
Since USERNOTICE's can be fairly complex and vary, the following sub-events are available:
:meth:`event_usernotice_subscription` :
Called when a USERNOTICE Subscription or Re-subscription event is received.
.. seealso::
For more information on how to handle USERNOTICE's visit:
https://dev.twitch.tv/docs/irc/tags/#usernotice-twitch-tags
Parameters
------------
channel: :class:`.Channel`
Channel object relevant to the USERNOTICE event.
tags : dict
A dictionary with the relevant information associated with the USERNOTICE.
This could vary depending on the event.
"""
pass
async def event_usernotice_subscription(self, metadata):
"""|coro|
Event called when a USERNOTICE subscription or re-subscription event is received from Twitch.
Parameters
------------
metadata: :class:`twitchio.dataclasses.NoticeSubscription`
The object containing various metadata about the subscription event.
For ease of use, this contains a :class:`.User` and :class:`.Channel`.
"""
pass
async def event_part(self, user):
"""|coro|
Event called when a PART is received from Twitch.
Parameters
------------
user: :class:`.User`
User object containing relevant information to the PART.
"""
pass
async def event_join(self, user):
"""|coro|
Event called when a JOIN is received from Twitch.
Parameters
------------
user: :class:`.User`
User object containing relevant information to the JOIN.
"""
pass
async def event_message(self, message):
"""|coro|
Event called when a PRIVMSG is received from Twitch.
Parameters
------------
message: :class:`.Message`
Message object containing relevant information.
"""
await self.handle_commands(message)
async def event_error(self, error: Exception, data=None):
"""|coro|
Event called when an error occurs while processing data.
Parameters
------------
error: Exception
The exception raised.
data: str
The raw data received from Twitch. Depending on how this is called, this could be None.
Example
---------
.. code:: py
@bot.event
async def event_error(error, data):
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
"""
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
async def event_ready(self):
"""|coro|
Event called when the Bot has logged in and is ready.
Example
---------
.. code:: py
@bot.event
async def event_ready():
print(f'Logged into Twitch | {bot.nick}')
"""
pass
async def event_raw_data(self, data):
"""|coro|
Event called with the raw data received by Twitch.
Parameters
------------
data: str
The raw data received from Twitch.
Example
---------
.. code:: py
@bot.event
async def event_raw_data(data):
print(data)
"""
pass
def command(self, *, name: str=None, aliases: Union[list, tuple]=None, cls=Command):
"""Decorator which registers a command on the bot.
Commands must be a coroutine.
Parameters
------------
name: str [Optional]
The name of the command. By default if this is not supplied, the function name will be used.
aliases: Union[list, tuple] [Optional]
The command aliases. This must be a list or tuple.
cls: class [Optional]
The custom command class to override the default class. This must be similar to :class:`.Command`.
no_global_checks : Optional[bool]
Whether or not the command should abide by global checks. Defaults to False, which checks global checks.
Raises
--------
TypeError
Cls is not a class.
"""
if not inspect.isclass(cls):
raise TypeError(f'cls must be of type <class> not <{type(cls)}>')
def decorator(func):
cmd_name = name or func.__name__
command = cls(name=cmd_name, func=func, aliases=aliases, instance=None)
self.add_command(command)
return command
return decorator
def event(self, func):
"""Decorator which adds an event listener to the bot.
Example
---------
.. code:: py
@bot.event
async def event_raw_data(data):
print(data)
@bot.event
async def event_message(message):
print(message.content)
await bot.handle_commands(message)
"""
if not inspect.iscoroutinefunction(func):
raise TypeError('Events must be coroutines.')
setattr(self, func.__name__, func)
return func
def check(self, func):
"""A decorator that adds a global check to the bot.
This decorator allows regular functions or coroutines to be added to the bot.
Global checks are ran before any other command specific checks.
As with all other checks, the check(predicate), must contain a sole parametere of Context.
Parameters
------------
func : callable
A regular function or coroutine to add as a global check.
Examples
----------
.. code::
@bot.check
async def my_global_check(self, ctx):
return ctx.author.is_mod
"""
self._checks.append(func)
return func
def add_listener(self, func, name: str=None):
"""Method which adds a coroutine as an extra listener.
This can be used to add extra event listeners to the bot.
Parameters
------------
func: coro [Required]
The coroutine to assign as a listener.
name: str [Required]
The event to register. E.g "event_message".
"""
if not inspect.iscoroutinefunction(func):
raise TypeError('Events must be coroutines.')
name = name or func.__name__
if name not in self.extra_listeners:
self.extra_listeners[name] = [func]
else:
self.extra_listeners[name].append(func)
def listen(self, event: str=None):
"""Decorator which adds a coroutine as a listener to an event.
This can be used in place of :meth:`.event` or when more than one of the same event is required.
Parameters
------------
event: str [Optional]
The event to listen to in the form of a string. E.g "event_message".
Example
----------
.. code:: py
@bot.event()
async def event_message(message):
print(message.content)
@bot.listen("event_message")
async def extra_message(message):
print(message.content)
"""
def wrapper(func):
self.add_listener(func, event)
return func
return wrapper
async def modify_webhook_subscription(self, *, callback=None, mode, topic, lease_seconds=0, secret=None):
"""|coro|
Creates a webhook subscription.
Parameters
----------
callback: Optional[str]
The URL which will be called to verify the subscripton and on callback.
If there's a webhook server running on the bot the callback will be automatically added.
mode: :class:`.WebhookMode`
Mode which describes whether the subscription should be created or not.
topic: :class:`.Topic`
Details about the subscription.
lease_seconds: Optional[int]
How many seconds the subscription should last. Defaults to 0, maximum is 846000.
secret: Optional[str]
A secret string which Twitch will use to add the `X-Hub-Signature` header to webhook requests.
You can use this to verify the POST request came from Twitch using `sha256(secret, body)`.
Raises
--------
Exception
No callback url was specified and there is no webhook server running to retrieve a callback url from.
HTTPException
Bad request while modifying the subscription.
"""
if callback is None:
if self._webhook_server is None:
raise Exception('No callback passed and no webhook server running to retrieve a callback url from.')
callback = f'{self._webhook_server.external}:{self._webhook_server.port}/{self._webhook_server.callback}'
await super().modify_webhook_subscription(
callback=callback, mode=mode, topic=topic, lease_seconds=lease_seconds, secret=secret
)
|
test_pipe_communication.py | import multiprocessing
import sys
from pymulproc import mpq_protocol, factory
def test_local_pipe_communication():
'''Test that communication.multiprocessing.pipe.peers library works well as follows:
1) Child can send a message
2) Parent can receive such message
3) Parent can send a message
4) Child can receive it
'''
pipe_factory = factory.PipeCommunication()
parent = pipe_factory.parent()
def call_child(p_factory):
child = p_factory.child()
child.send(mpq_protocol.REQ_TEST_PARENT)
stop = False
loops = 10000
# Avoiding hanging for ever
while not stop and loops:
loops -= 1
stop = child.receive()
# If we have not received anything or the wrong message from parent ==> Let the parent know that the job
# was not complete and I exited with wrong
try:
assert len(stop) == 4
assert stop[mpq_protocol.S_PID_OFFSET - 1] == mpq_protocol.REQ_TEST_CHILD
except AssertionError:
sys.exit(1)
child_process = multiprocessing.Process(target=call_child, args=(pipe_factory,))
child_process.start()
stop = False
loops = 10000
while not stop and loops:
loops -= 1
stop = parent.receive()
assert len(stop) == 4
assert stop[mpq_protocol.S_PID_OFFSET - 1] == mpq_protocol.REQ_TEST_PARENT
parent.send(mpq_protocol.REQ_TEST_CHILD)
child_process.join()
assert child_process.exitcode == 0
|
cli.py | # encoding: utf-8
from __future__ import print_function
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
from optparse import OptionConflictError
import traceback
from six import text_type
from six.moves import input, xrange
from six.moves.urllib.error import HTTPError
from six.moves.urllib.parse import urljoin, urlparse
from six.moves.urllib.request import urlopen
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
from ckan.config.middleware import make_app
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.plugins as p
from ckan.common import config
# This is a test Flask request context to be used internally.
# Do not use it!
_cli_test_request_context = None
# NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while '@' not in data_dict.get('email', ''):
data_dict['email'] = input('Email address: ').strip()
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError as e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().strip().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
# Set this internal test request context with the configured environment so
# it can be used when calling url_for from the CLI.
global _cli_test_request_context
app = make_app(conf.global_conf, **conf.local_conf)
flask_app = app.apps['flask_app']._wsgi_app
_cli_test_request_context = flask_app.test_request_context()
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
site_user = None
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
return site_user
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
self.site_user = load_config(self.options.config, load_site_user)
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print('Initialising DB: SUCCESS')
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print('Cleaning DB: SUCCESS')
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print('Creating DB: SUCCESS')
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print('Dumped database to: %s' % filepath)
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print('Loaded CKAN database: %s' % filepath)
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print('Upgrading DB')
import ckan.model as model
model.repo.upgrade_db()
print('Rebuilding search index')
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print('Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.')
print('Done')
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print("failed to fetch %s (code %s)" % (url,
response.status_code))
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError as e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print("Saved url %s" % url)
def version(self):
from ckan.model import Session
print(Session.execute('select version from '
'migrate_version;').fetchall())
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print(self.usage)
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print('Command %s not recognized' % cmd)
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print('Missing parameter: dataset-name')
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print('Command %s not recognized' % cmd)
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print(RDFExport.__doc__)
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urlopen(url).read()
except HTTPError as e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError as ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
password
email
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print('Command %s not recognized' % cmd)
def list(self):
import ckan.model as model
print('Sysadmins:')
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print('count = %i' % sysadmins.count())
for sysadmin in sysadmins:
print('%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id))
def add(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('User "%s" not found' % username)
makeuser = input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(text_type(username))
else:
print('Exiting ...')
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print('Added %s as sysadmin' % username)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('Error: user "%s" not found!' % username)
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print('Users:')
users = model.Session.query(model.User).filter_by(state='active')
print('count = %i' % users.count())
for user in users:
print(self.get_user_str(user))
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(text_type(username))
print('User: \n', user)
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print('Done')
def search(self):
import ckan.model as model
if len(self.args) < 2:
print('Need user name query string.')
return
query_str = self.args[1]
query = model.User.search(query_str)
print('%i users matching %r:' % (query.count(), query_str))
for user in query.all():
print(self.get_user_str(user))
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print(self.usage)
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print('Datasets:')
datasets = model.Session.query(model.Package)
print('count = %i' % datasets.count())
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print('%s %s %s' % (dataset.id, dataset.name, state))
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(text_type(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print('%s %s -> %s' % (dataset.name, old_state, dataset.state))
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print('%s purged' % name)
class Celery(CkanCommand):
'''Celery daemon [DEPRECATED]
This command is DEPRECATED, use `paster jobs` instead.
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
error('Command %s not recognized' % cmd)
def run_(self):
deprecation_warning(u'Use `paster jobs worker` instead.')
default_ini = os.path.join(os.getcwd(), 'development.ini')
if self.options.config:
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
elif os.path.isfile(default_ini):
os.environ['CKAN_CONFIG'] = default_ini
else:
error('No .ini specified and none was found in current directory')
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append('--concurrency=1')
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
deprecation_warning(u'Use `paster jobs list` instead.')
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print('%i messages (total)' % q.count())
print('%i visible messages' % q_visible.count())
for message in q:
if message.visible:
print('%i: Visible' % (message.id))
else:
print('%i: Invisible Sent:%s' % (message.id, message.sent_at))
def clean(self):
deprecation_warning(u'Use `paster jobs clear` instead.')
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print('No tasks to delete')
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print('%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially))
if tasks_afterwards:
error('Failed to delete all tasks')
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print('Command %s not recognized' % cmd)
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print('tracking updated for %s' % start_date)
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print('%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date))
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print("Error: package %s not found." % (package_id))
not_found += 1
except KeyboardInterrupt:
print("Stopped.")
return
except:
raise
print('search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else ""))
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print(plugin + ':')
print('-' * (len(plugin) + 1))
if p['doc']:
print(p['doc'])
print('Implements:')
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print(' %s' % i)
if extra:
print(extra)
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print('Creating %s test data' % cmd)
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print('Created user %r with password %r and apikey %r' %
('tester', 'tester', 'tester'))
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print('Command %s not recognized' % cmd)
raise NotImplementedError
if self.verbose:
print('Creating %s test data: Complete!' % cmd)
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print('App error: ', url.strip())
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print('Only top 10% of lines shown')
print('Written profile to: %s' % output_filename)
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print(hue, saturation)
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
public = config.get(u'ckan.base_public_folder')
path = os.path.dirname(__file__)
path = os.path.join(path, '..', public, 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print('custom colors removed.')
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print('ERROR: invalid color')
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print('ERROR argument `%s` not recognised' % arg)
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print('%s: %s;\n' % (self.rules[i], colors[i]))
f.close
print('Color scheme has been created.')
print('Make sure less is run for changes to take effect.')
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print('command not recognised')
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print('zh_TW has been mangled')
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print('removing %s' % path)
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
import ckan.lib.fanstatic_resources as fanstatic_resources
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print("Minified file '{0}'".format(path))
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print('compile %s.css' % color)
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
print(output)
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.options = self.options
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print(self.usage)
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print(self.usage)
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError as e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print('No resource views to delete')
return
print('This command will delete.\n')
for row in results:
print('%s of type %s' % (row[1], row[0]))
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print('Not Deleting.')
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print('Deleted resource views.')
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError as e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
basic_routing.py | import pika
from queue import Empty, Queue
import multiprocessing as mp
import ctypes
import time
import math
import logging
import os
import signal
from datetime import datetime
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
WAIT_READY_TIMEOUT = None
def _fmt_bytes(b, maxlen=32):
if len(b) > maxlen:
return "{}...({} bytes)".format(b[:maxlen], len(b))
else:
return "{}({} bytes)".format(b, len(b))
class _RoutingConnector:
def __init__(self, url, exchange, daemon=True, exchange_arguments={}):
self._params = pika.URLParameters(url)
self._exchange = exchange
self._exchange_args = exchange_arguments or {}
self._pipe_in, self._pipe_out = mp.Pipe()
self._connection = None
self._channel = None
self._ready = mp.Value(ctypes.c_bool, False)
self._closed = False
# self._counter = mp.Value(ctypes.c_int, 0)
# self._starttime = datetime.now()
self._proc = mp.Process(target=self._run, daemon=daemon)
@property
def child_pipe(self):
raise NotImplementedError()
@property
def parent_pipe(self):
raise NotImplementedError()
def _run(self):
self.parent_pipe.close()
log.debug("{} creating connection".format(self))
self._connection = pika.SelectConnection(self._params,
on_open_callback=self._on_open,
stop_ioloop_on_close=True)
try:
self._connection.ioloop.start()
except KeyboardInterrupt:
log.debug("{} received interrupt and is exiting".format(self))
def _on_open(self, connection):
connection.channel(self._on_channel_open)
def _on_channel_open(self, channel):
self._channel = channel
self._channel.exchange_declare(self._on_exchange_ok, self._exchange, arguments=self._exchange_args)
def _on_exchange_ok(self, _):
raise NotImplementedError()
def _mark_ready(self):
self._ready.value = True
def wait_till_ready(self, timeout=None, interval=0.001):
timeout = timeout if timeout is not None else WAIT_READY_TIMEOUT
if timeout is not None and timeout > 0:
for _ in range(int(math.ceil(timeout / interval))):
if self._ready.value:
return True
time.sleep(interval)
else:
while not self._ready.value:
time.sleep(interval)
return True
raise TimeoutError()
def _interrupt(self):
log.debug("{} terminating via interrupt".format(self))
os.kill(self._proc.pid, signal.SIGINT)
def start(self):
self._proc.start()
self.child_pipe.close()
if self.child_pipe is self._pipe_in:
self._pipe_in = None
else:
self._pipe_out = None
def close(self):
try:
self._interrupt()
self.parent_pipe.close()
except ProcessLookupError:
# The process is already toast
log.debug("Attempted to close {} but the child process was already gone".format(self))
self._proc.join()
self._closed = True
# secs = (datetime.now() - self._starttime).total_seconds()
# count = self._counter.value
# log.info("{} processed {} messages over {} seconds ({}/s)".format(self, count, secs, count / secs))
def __enter__(self):
self.start()
self.wait_till_ready()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class RoutingReader(_RoutingConnector):
def __init__(self, url, exchange, queue_name, routing_key, *,
exclusive=False, exchange_args=None, queue_args=None, daemon=True):
"""
:param url:
:type url: str
:param exchange:
:type exchange: str
:param queue_name:
:type queue_name: str
:param routing_key:
:type routing_key: str
:param exclusive:
:type exclusive: bool
:param daemon:
:type daemon: bool
:param exchange_args:
:type exchange_args: dict
:param queue_args:
:type queue_args: dict
"""
self._consumer_tag = None
self._queue_name = queue_name
self._routing_key = routing_key
self._exclusive = exclusive
self._queue_args = queue_args or {}
super().__init__(url, exchange, exchange_arguments=exchange_args, daemon=daemon)
@property
def child_pipe(self):
return self._pipe_in
@property
def parent_pipe(self):
return self._pipe_out
def _on_exchange_ok(self, _):
self._channel.queue_declare(callback=self._on_queue_ok,
queue=self._queue_name,
exclusive=self._exclusive,
arguments=self._queue_args)
def _on_queue_ok(self, _):
self._channel.queue_bind(callback=self._on_bind_ok,
queue=self._queue_name,
exchange=self._exchange,
routing_key=self._routing_key)
def _on_bind_ok(self, _):
self._channel.add_on_cancel_callback(self._on_cancel)
self._start_consuming()
def _start_consuming(self):
self._consumer_tag = self._channel.basic_consume(self._on_message, self._queue_name)
log.debug("{} listening on {}/{} for key {}".format(self, self._exclusive, self._queue_name, self._routing_key))
self._mark_ready()
def _on_cancel(self, _):
self._connection.close()
def _on_message(self, chan, deliver, props, body):
try:
log.debug("{} received {} from {}/{}".format(self, _fmt_bytes(body), self._exchange, self._queue_name))
self.child_pipe.send_bytes(body)
self._channel.basic_ack(delivery_tag=deliver.delivery_tag)
except Exception:
self._channel.basic_nack(delivery_tag=deliver.delivery_tag)
raise
def _stop_consuming(self):
if self._channel:
self._channel.basic_cancel(self._on_cancel_ok, self._consumer_tag)
self._connection.ioloop.start()
def _on_cancel_ok(self, _):
self._channel.close()
self._connection.close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except BrokenPipeError:
raise StopIteration()
def get(self):
return self.parent_pipe.recv_bytes()
def get_nowait(self):
if self.parent_pipe.poll():
return self.parent_pipe.recv_bytes()
else:
raise Empty()
def getall_nowait(self, max_items=None):
if max_items:
for _ in range(max_items):
yield self.get_nowait()
else:
while self.parent_pipe.poll():
yield self.get()
def empty(self):
return not self.parent_pipe.poll()
class RoutingWriter(_RoutingConnector):
def __init__(self, url, exchange, routing_key='', *,
mandatory=False, immediate=False, retry=False, poll_time=0.01, exchange_args=None, daemon=True):
"""
:param url:
:type url: str
:param exchange:
:type exchange: str
:param routing_key:
:type routing_key: str
:param mandatory:
:type mandatory: bool
:param immediate:
:type immediate: bool
:param retry:
:type retry: bool
:param daemon:
:type daemon: bool
:param poll_time:
:type poll_time: Real
:param exchange_args:
:type exchange_args: dict
"""
self._routing_key = routing_key
self._poll_timeout = poll_time
self._mandatory = mandatory
self._immediate = immediate
self._retry = retry
self._retry_queue = Queue() if retry else None
super().__init__(url, exchange, exchange_arguments=exchange_args, daemon=daemon)
@property
def child_pipe(self):
return self._pipe_out
@property
def parent_pipe(self):
return self._pipe_in
def _on_exchange_ok(self, _):
self._channel.add_on_return_callback(self._on_return)
self._mark_ready()
self._publish()
def _publish(self):
# Send new messages
while self.child_pipe.poll():
key, msg = self.child_pipe.recv()
self._channel.basic_publish(self._exchange, key, msg,
mandatory=self._mandatory, immediate=self._immediate)
# Send retry messages
while self._retry_queue and not self._retry_queue.empty():
self._channel.basic_publish(self._exchange, self._routing_key, self._retry_queue.get(),
mandatory=self._mandatory, immediate=self._immediate)
# Schedule this function for sometime in the near future
self._connection.add_timeout(self._poll_timeout, self._publish)
def _on_return(self, channel, method, prop, body):
if self._retry:
log.warning("{} got message {} returned, retrying to send".format(self, _fmt_bytes(body)))
self._retry_queue.put(body)
else:
log.warning("{} got message {} returned, dropping it".format(self, _fmt_bytes(body)))
def put(self, value, routing_key=None):
key = routing_key if routing_key is not None else self._routing_key
# # Not sure if this would actually be worth a warning
# if not key:
# log.warning("{} had no or empty routing key provided for message {}".format(self, _fmt_bytes(value)))
self.parent_pipe.send((key, value))
def putall(self, values):
for v in values:
self.put(v)
|
httpsserver.py | import ssl
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
from hashlib import sha256
import threading
import time
import datetime
from modules.config import Config
from modules.api import FenrisApi
# Constants
CONFIG=Config()
API_URL=CONFIG.params.getProperty("httpsserver")["api_url"]
API=FenrisApi()
CFG_PARAM=CONFIG.params.getProperty("httpsserver")
class RequestHandlerHTTPS(BaseHTTPRequestHandler):
""" Web server HTTPS request handler.
Inheriting from BaseHTTPRequestHandler.
"""
def generateAndSendAPIData(self):
now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
# Hack for fixing request issue for localhost
# DISCLAIMER: If the machine resolves to a FQDN, and a FQDN is
# requested and resolves to the machine, this will fail and return the FQDN.
# As of now, this will give errors inserting data into the API DB.
if self.headers.get('Host') == 'localhost':
host_generic_ip = '127.0.0.1'
else:
host_generic_ip = self.headers.get('Host')
api_data = {
"host_generic_ip": host_generic_ip,
"user_agent": self.headers.get('User-Agent'),
"accept": self.headers.get('Accept'),
"accept_language": self.headers.get('Accept-Language'),
"accept_encoding": self.headers.get('Accept-Encoding'),
"https_connection": self.headers.get('Connection'),
"https_command": self.command,
"https_path": self.path,
"request_version": self.request_version,
"request_timestamp": now
}
API.send(API_URL, api_data)
if CFG_PARAM["debug"]:
print("HTTPS API DATA:", api_data)
def do_GET(self):
self.send_response(200)
if CFG_PARAM["api"]:
self.generateAndSendAPIData()
self.end_headers()
def do_POST(self):
self.send_response(200)
if CFG_PARAM["api"]:
self.generateAndSendAPIData()
self.end_headers()
class FenrisHTTPSServer:
""" Base class for HTTPS server. """
def __init__(self, listener_ip = None, port = None):
""" Instance constructor
:params: config: Configuration parameters (yaml)
:params: listener_ip: Listener IP address (string)
:params: port: Listener port (int)
"""
self.listener_ip = listener_ip
self.port = port
self.crt_file = CFG_PARAM["crt_file"]
if self.listener_ip is None:
self.listener_ip = CFG_PARAM["listener_ip"]
# print(f"HTTPS Server: Listener IP not set. Setting listener IP to \"{self.listener_ip}\"")
if self.port is None:
self.port = CFG_PARAM["listener_port"]
# print(f"HTTPS Server: Port not set. Setting port to \"{self.port}\"")
def start(self):
print(f"FenrisHTTPSServer: Starting HTTPS server on {self.listener_ip}:{self.port}")
self.httpd = HTTPServer((self.listener_ip, self.port), RequestHandlerHTTPS)
self.httpd.socket = ssl.wrap_socket(self.httpd.socket, server_side=True, certfile='localhost.pem', ssl_version=ssl.PROTOCOL_TLS)
self.thread = threading.Thread(target=self.httpd.serve_forever, daemon=True)
self.thread.start()
|
queueloader.py | import sys,time,os,signal
import numpy
import threading
import random
import larcv
from . batch_pydata import batch_pydata
from . larcv_io_enums import RandomAccess
from . larcv_writer import larcv_writer
class queue_interface(object):
def __init__(self, verbose=False, random_access_mode="random_blocks", seed=None):
'''init function
Not much to store here, just a dict of dataloaders and the keys to access their data.
Queue loaders are manually triggered IO, not always running, so
'''
object.__init__(self)
self._queueloaders = {}
self._data_keys = {}
self._dims = {}
self._verbose = verbose
self._random_access = RandomAccess[random_access_mode]
self._minibatch_size = {}
self._queue_prev_entries = {}
self._queue_next_entries = {}
self._count = {}
if seed is not None:
random.seed(seed)
numpy.random.seed(seed)
self._warning = True
self._writer = None
def no_warnings(self):
self._warning = False
def get_next_batch_indexes(self, mode, minibatch_size):
# Using the random_access parameter, determine which entries to read:
# In this function, we promote the "next" to "prev". Do that first:
if mode not in self._queue_next_entries:
self._queue_next_entries[mode] = None
self._queue_prev_entries[mode] = self._queue_next_entries[mode]
# (If this has not been run, they will both be set to None)
if self._random_access == RandomAccess.serial_access:
# Figure out the last batch's highest entry:
if mode in self._queue_prev_entries:
if self._queue_prev_entries[mode] is not None:
last_entry = numpy.max(self._queue_prev_entries[mode])
else:
last_entry = -1
else:
last_entry = -1
next_last_entry = minibatch_size + last_entry + 1
n_entries = self._queueloaders[mode].fetch_n_entries()
if next_last_entry < n_entries:
next_entries = numpy.arange(minibatch_size, dtype=numpy.int32) + last_entry + 1
else:
# Create an array to cover the entries till the last one ...
next_entries_a = numpy.arange(n_entries - 1 - last_entry, dtype=numpy.int32) + last_entry + 1
# ... and one array for the leftover entries, starting back from zero
next_entries_b = numpy.arange((last_entry + 1 + minibatch_size) % n_entries, dtype=numpy.int32)
# Finally concatenate the two arrays
next_entries = numpy.concatenate((next_entries_a, next_entries_b))
elif self._random_access == RandomAccess.random_blocks:
# How many entries are there?
n_entries = self._queueloaders[mode].fetch_n_entries()
# The number of available choices is the number of entries - minibatch_size - 1:
n_choices = n_entries - minibatch_size - 1
start_entry = numpy.random.randint(low=0, high=n_choices, size=1)
next_entries = numpy.arange(minibatch_size) + start_entry
else: # self._random_access == RandomAccess.random_events
# Choose randomly, but require unique indexes:
n_entries = self._queueloaders[mode].fetch_n_entries()
next_entries = random.sample(range(n_entries), minibatch_size)
self._queue_next_entries[mode] = next_entries
return next_entries
def set_next_index(self, mode, entry):
'''
Set the next entry to be read. Only works in serial_access mode.
Argumements:
mode {str} -- The mode of training to store this threadio under (typically "train" or "TEST" or similar)
entry {int} -- The next entry to be read
'''
if mode not in self._queue_prev_entries:
throw('Need to call prepare_manager first.')
if self._random_access != RandomAccess.serial_access:
throw('set_next_entry can only be called in serial_access mode.')
self._queue_next_entries[mode] = [entry - 1.]
return
def prepare_manager(self, mode, io_config, minibatch_size, data_keys, color=None):
'''Prepare a manager for io
Creates an instance of larcv_threadio for a particular file to read.
Arguments:
mode {str} -- The mode of training to store this threadio under (typically "train" or "TEST" or similar)
io_config {dict} -- the io config dictionary. Required keys are: 'filler_name', 'verbosity', and 'filler_cfg'
data_keys_override {dict} -- If desired, you can override the keys for dataacces,
Raises:
Exception -- [description]
'''
if mode in self._queueloaders:
raise Exception("Can not prepare manager for mode {}, already exists".format(mode))
# Check that the required keys are in the io config:
for req in ['filler_name', 'verbosity', 'filler_cfg']:
if req not in io_config:
raise Exception("io_config for mode {} is missing required key {}".format(mode, req))
start = time.time()
# Initialize and configure a manager:
io = larcv_queueio()
io.configure(io_config)
self._queueloaders.update({mode : io})
self._minibatch_size[mode] = minibatch_size
# Queue loaders are manually triggered IO, not always running, so
# there is no "start_manager" function. Everything is manual.
# First, tell it what the entries for the first batch to read:
# start = time.time()
# print(self._queueloaders[mode].is_reading())
self.prepare_next(mode)
# print(self._queueloaders[mode].is_reading())
# end = time.time()
# print(end - start)
# Then, we promote those entries to the "current" batch:
while self._queueloaders[mode].is_reading():
# print(self._queueloaders[mode].is_reading())
time.sleep(0.01)
io.pop_current_data()
io.next(store_entries=True,store_event_ids=True)
# Note that there is no "next" data pipelined yet.
# Store the keys for accessing this datatype:
self._data_keys[mode] = data_keys
# Read and save the dimensions of the data:
self._dims[mode] = {}
for key in self._data_keys[mode]:
self._dims[mode][key] = self._queueloaders[mode].fetch_data(self._data_keys[mode][key]).dim()
end = time.time()
self.prepare_next(mode)
# Print out how long it took to start IO:
if self._verbose:
sys.stdout.write("Time to start {0} IO: {1:.2}s\n".format(mode, end - start))
return
def prepare_writer(self, io_config, output_file=None):
if self._writer is not None:
raise Exception("queue_interface doesn't yet support multiple writers.")
# This only supports batch datasize of 1. We can check that the reading instance
# Has only size 1 by looking at the dims.
key =list(self._data_keys['primary'].items())[0][0]
if self._dims['primary'][key][0] != 1:
raise Exception("To use the writing interface, please set batch size to 1.")
# The writer is not an instance of queueIO but rather an instance of larcv_writer.
# It configures a process to copy input to output and add more information as well.
self._writer = larcv_writer(io_config, output_file)
pass
def prepare_next(self, mode, set_entries = None):
'''Set in motion the processing of the next batch of data.
Triggers the queue loader to start reading the next set of data
'''
# Which events should we read?
if set_entries is None:
set_entries = self.get_next_batch_indexes(mode, self._minibatch_size[mode])
self._queueloaders[mode].set_next_batch(set_entries)
self._queueloaders[mode].prepare_next()
self._count[mode] = 0
return
# return threading.Thread(target=self._queueloaders[mode].batch_process).start()
# self._queueloaders[mode].next(store_event_ids=True, store_entries=True)
# return
def fetch_minibatch_data(self, mode, pop=False, fetch_meta_data=False, data_shape=None, channels="last"):
# Return a dictionary object with keys 'image', 'label', and others as needed
# self._queueloaders['train'].fetch_data(keyword_label).dim() as an example
if self._count[mode] != 0:
if self._warning:
print("Calling fetch_minibatch_data without calling prepare_next. This will not give new data.")
print("To quiet this warning, call prepare_next before fetch_minibatch_data or call queueloader.no_warnings()")
if pop:
# This function will pop the data
while self._queueloaders[mode].is_reading():
time.sleep(0.001)
self._queueloaders[mode].pop_current_data()
else:
if self._warning:
print("Calling fetch_minibatch_data with pop = False. This will give you the same data as last time.")
print("To quiet this warning, call queueloader.no_warnings()")
self._queueloaders[mode].next(store_entries=fetch_meta_data, store_event_ids=fetch_meta_data)
this_data = {}
self.fetch_minibatch_dims(mode)
for key in self._data_keys[mode]:
# self._dims[mode][key] = self._queueloaders[mode].fetch_data(self._data_keys[mode][key]).dim()
this_data[key] = self._queueloaders[mode].fetch_data(
self._data_keys[mode][key]).data(
shape=data_shape, channels=channels)
# this_data[key] = numpy.reshape(this_data[key], self._dims[mode][key])
if fetch_meta_data:
this_data['entries'] = self._queueloaders[mode].fetch_entries()
this_data['event_ids'] = self._queueloaders[mode].fetch_event_ids()
self._count[mode] += 1
return this_data
def fetch_minibatch_dims(self, mode):
# Return a dictionary object with keys 'image', 'label', and others as needed
# self._queueloaders['train'].fetch_data(keyword_label).dim() as an example
return self._dims[mode]
# def is_active(self):
# _is_active = False
# for mode in self._queueloaders:
# _is_active = _is_active and self._queueloaders[mode].
def stop(self):
if self._writer is not None:
self._writer.finalize()
def size(self, mode):
# return the number of images in the specified mode:
return self._queueloaders[mode].fetch_n_entries()
def is_reading(self, mode):
return self._queueloaders[mode].is_reading()
def write_output(self, data, datatype, producer, entries, event_ids):
if self._writer is None:
raise Exception("Trying to write data with no writer configured. Abort!")
self._writer.write(data=data, datatype=datatype, producer=producer, entries=entries, event_ids=event_ids)
return
class larcv_queueio (object):
_instance_m={}
@classmethod
def exist(cls,name):
name = str(name)
return name in cls._instance_m
@classmethod
def instance_by_name(cls,name):
return cls._instance_m[name]
def __init__(self):
self._proc = None
self._name = ''
self._verbose = False
self._read_start_time = None
self._read_end_time = None
self._cfg_file = None
self._storage = {}
self._event_entries = None
self._event_ids = None
def reset(self):
while self.is_reading(): time.sleep(0.01)
if self._proc: self._proc.reset()
def __del__(self):
try:
self.reset()
except AttrbuteError:
pass
def configure(self,cfg, color=0):
# if "this" was configured before, reset it
if self._name: self.reset()
# get name
if not cfg['filler_name']:
sys.stderr.write('filler_name is empty!\n')
raise ValueError
# ensure unique name
if self.__class__.exist(cfg['filler_name']) and not self.__class__.instance_by_name(cfg['filler_name']) == self:
sys.stderr.write('filler_name %s already running!' % cfg['filler_name'])
return
self._name = cfg['filler_name']
# get QueueProcessor config file
self._cfg_file = cfg['filler_cfg']
if not self._cfg_file or not os.path.isfile(self._cfg_file):
sys.stderr.write('filler_cfg file does not exist: %s\n' % self._cfg_file)
raise ValueError
# set verbosity
if 'verbosity' in cfg:
self._verbose = bool(cfg['verbosity'])
# configure thread processor
self._proc = larcv.QueueProcessor(self._name)
self._proc.configure(self._cfg_file, color)
# fetch batch filler info
self._storage = {}
for i in range(len(self._proc.batch_fillers())):
pid = self._proc.batch_fillers()[i]
name = self._proc.storage_name(pid)
dtype = larcv.BatchDataTypeName(self._proc.batch_types()[i])
self._storage[name]=batch_pydata(dtype)
if 'make_copy' in cfg and cfg['make_copy']:
self._storage[name]._make_copy = True
# all success?
# register *this* instance
self.__class__._instance_m[self._name] = self
def set_next_batch(self, batch_indexes):
# if type(batch_indexes) != larcv.VectorOfSizet:
# indexes = larcv.VectorOfSizet()
# indexes.resize(len(batch_indexes))
# for i, val in enumerate(batch_indexes):
# indexes[i] = int(val)
# batch_indexes = indexes
self._proc.set_next_batch(batch_indexes)
def batch_process(self):
while self.is_reading():
time.sleep(0.01)
self._proc.batch_process()
def prepare_next(self):
self._proc.prepare_next()
def is_reading(self,storage_id=None):
return self._proc.is_reading()
def pop_current_data(self):
# Promote the "next" data to current in C++ and release current
self._proc.pop_current_data()
def next(self,store_entries=False,store_event_ids=False):
# Calling next will load the next set of data into batch_pydata. It does not do any
# automatic data loading or steping, you must do this manually.
for name,storage in self._storage.items():
dtype = storage.dtype()
if dtype == "float32":
factory = larcv.BatchDataQueueFactoryFloat.get()
elif dtype == "float64":
factory = larcv.BatchDataQueueFactoryDouble.get()
elif dtype == "int":
factory = larcv.BatchDataQueueFactoryInt.get()
# These here below are NOT yet wrapped with swig. Submit a ticket if you need them!
# elif dtype == "char":
# factory = larcv.BatchDataQueueFactoryDouble.get()
# elif dtype == "short":
# factory = larcv.BatchDataQueueFactoryDouble.get()
# elif dtype == "string":
# factory = larcv.BatchDataQueueFactoryDouble.get()
else:
factory = None
batch_storage = factory.get_queue(name)
batch_data = factory.get_queue(name).get_batch()
storage.set_data(storage_id=name, larcv_batchdata=batch_data)
if not store_entries: self._event_entries = None
else: self._event_entries = self._proc.processed_entries()
if not store_event_ids: self._event_ids = None
else: self._event_ids = self._proc.processed_events()
return
def fetch_data(self,key):
try:
return self._storage[key]
except KeyError:
sys.stderr.write('Cannot fetch data w/ key %s (unknown)\n' % key)
return
def fetch_event_ids(self):
return self._event_ids
def fetch_entries(self):
return self._event_entries
def fetch_n_entries(self):
return self._proc.get_n_entries()
|
gerar_silhuetas_resnet50.py | # Cópia de inference_pretrained.py modificada para o dataset TUM GAID
import torch
import segmentation_models_pytorch as smp
import numpy as np
from torchvision import transforms
from PIL import Image
import os
from multiprocessing import Process
import time
def listar_imagens(basedir):
# Obtém lista de imagens armazenadas no baseDir
fname = []
dname = []
for root, d_names, f_names in os.walk(basedir):
for f in f_names:
fname.append(os.path.join(root, f))
for d in d_names:
dname.append(os.path.join(root, d))
fname = sorted(fname)
dname = sorted(dname)
return fname, dname
def gerar_silhuetas(fnamex, dev):
fname = fnamex
# Define o path para salvar os arquivos
silhouettes = [it.replace(basedir, basedir+'_silhouettes_50')
for it in fname]
# Configura o modelo de CNN
device = torch.device(dev)
model = torch.hub.load(
'pytorch/vision', 'deeplabv3_resnet50', pretrained=True)
model.to(device)
model.eval()
bad_files = []
for f, filename in enumerate(fname):
# Carrega e preprocessa uma imagem
try:
input_image = Image.open(filename)
input_tensor = preprocess(input_image)
except:
bad_files.append(filename)
print('arquivo corrompido '+filename)
exit
# Cria um minibatch contendo a imagem e envia para a GPU
input_batch = input_tensor.unsqueeze(0).to(device)
# Executa a predição da silhueta
with torch.no_grad():
output = model(input_batch)['out'][0]
output_silhouette = output.argmax(0)
output_silhouette[output_silhouette == 15] = 255
# Converte a predição obtida em imagem e copia para a CPU
silhouette_image = Image.fromarray(output_silhouette.byte().cpu().numpy()
).resize(input_image.size)
# Salva silhueta no disco
os.makedirs(os.path.dirname(silhouettes[f]) + "/", exist_ok=True)
silhouette_image.save(silhouettes[f])
if __name__ == '__main__':
basedir = '/projects/jeff/TUMGAIDimage'
fname, _ = listar_imagens(basedir)
# Define transformações que serão aplicadas às imagens
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
# Divide os dados em 3 partes
fname1, fname2, fname3 = np.array_split(fname, 3)
# Prepara e inicia 3 processos paralelos (para cada GPU)
inicio = time.time()
print(time.strftime('Processamento iniciado - %H:%M:%S', time.localtime()))
p1 = Process(target=gerar_silhuetas, args=(fname1, 'cuda:0'))
p1.start()
p2 = Process(target=gerar_silhuetas, args=(fname2, 'cuda:1'))
p2.start()
p3 = Process(target=gerar_silhuetas, args=(fname3, 'cuda:2'))
p3.start()
# Tempo decorrido
p1.join()
p2.join()
p3.join()
print(time.strftime('Processamento concluído - %H:%M:%S', time.localtime()))
tempo_total = time.time() - inicio
print("Tempo total: %02dm:%02ds" % divmod(tempo_total, 60))
|
update_repository_manager.py | """
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
from sqlalchemy import false
import tool_shed.util.shed_util_common as suc
from tool_shed.util import common_util
from tool_shed.util import encoding_util
from tool_shed.util import repository_util
from galaxy import util
log = logging.getLogger( __name__ )
class UpdateRepositoryManager( object ):
def __init__( self, app ):
self.app = app
self.context = self.app.install_model.context
# Ideally only one Galaxy server process should be able to check for repository updates.
if self.app.config.enable_tool_shed_check:
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.daemon = True
self.restarter.start()
self.seconds_to_sleep = int( app.config.hours_between_check * 3600 )
def get_update_to_changeset_revision_and_ctx_rev( self, repository ):
"""Return the changeset revision hash to which the repository can be updated."""
changeset_revision_dict = {}
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, str( repository.tool_shed ) )
params = dict( name=str( repository.name ),
owner=str( repository.owner ),
changeset_revision=str( repository.installed_changeset_revision ) )
pathspec = [ 'repository', 'get_changeset_revision_and_ctx_rev' ]
try:
encoded_update_dict = util.url_get( tool_shed_url, password_mgr=self.app.tool_shed_registry.url_auth( tool_shed_url ), pathspec=pathspec, params=params )
if encoded_update_dict:
update_dict = encoding_util.tool_shed_decode( encoded_update_dict )
includes_data_managers = update_dict.get( 'includes_data_managers', False )
includes_datatypes = update_dict.get( 'includes_datatypes', False )
includes_tools = update_dict.get( 'includes_tools', False )
includes_tools_for_display_in_tool_panel = update_dict.get( 'includes_tools_for_display_in_tool_panel', False )
includes_tool_dependencies = update_dict.get( 'includes_tool_dependencies', False )
includes_workflows = update_dict.get( 'includes_workflows', False )
has_repository_dependencies = update_dict.get( 'has_repository_dependencies', False )
has_repository_dependencies_only_if_compiling_contained_td = update_dict.get( 'has_repository_dependencies_only_if_compiling_contained_td', False )
changeset_revision = update_dict.get( 'changeset_revision', None )
ctx_rev = update_dict.get( 'ctx_rev', None )
changeset_revision_dict[ 'includes_data_managers' ] = includes_data_managers
changeset_revision_dict[ 'includes_datatypes' ] = includes_datatypes
changeset_revision_dict[ 'includes_tools' ] = includes_tools
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = includes_tools_for_display_in_tool_panel
changeset_revision_dict[ 'includes_tool_dependencies' ] = includes_tool_dependencies
changeset_revision_dict[ 'includes_workflows' ] = includes_workflows
changeset_revision_dict[ 'has_repository_dependencies' ] = has_repository_dependencies
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = has_repository_dependencies_only_if_compiling_contained_td
changeset_revision_dict[ 'changeset_revision' ] = changeset_revision
changeset_revision_dict[ 'ctx_rev' ] = ctx_rev
except Exception as e:
log.debug( "Error getting change set revision for update from the tool shed for repository '%s': %s" % ( repository.name, str( e ) ) )
changeset_revision_dict[ 'includes_data_managers' ] = False
changeset_revision_dict[ 'includes_datatypes' ] = False
changeset_revision_dict[ 'includes_tools' ] = False
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = False
changeset_revision_dict[ 'includes_tool_dependencies' ] = False
changeset_revision_dict[ 'includes_workflows' ] = False
changeset_revision_dict[ 'has_repository_dependencies' ] = False
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = False
changeset_revision_dict[ 'changeset_revision' ] = None
changeset_revision_dict[ 'ctx_rev' ] = None
return changeset_revision_dict
def __restarter( self ):
log.info( 'Update repository manager restarter starting up...' )
while self.running:
# Make a call to the Tool Shed for each installed repository to get the latest
# status information in the Tool Shed for the repository. This information includes
# items like newer installable repository revisions, current revision updates, whether
# the repository revision is the latest installable revision, and whether the repository
# has been deprecated in the Tool Shed.
for repository in self.context.query( self.app.install_model.ToolShedRepository ) \
.filter( self.app.install_model.ToolShedRepository.table.c.deleted == false() ):
tool_shed_status_dict = repository_util.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
else:
# The received tool_shed_status_dict is an empty dictionary, so coerce to None.
tool_shed_status_dict = None
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
self.sleeper.sleep( self.seconds_to_sleep )
log.info( 'Update repository manager restarter shutting down...' )
def shutdown( self ):
if self.app.config.enable_tool_shed_check:
self.running = False
self.sleeper.wake()
def update_repository_record( self, repository, updated_metadata_dict, updated_changeset_revision, updated_ctx_rev ):
"""
Update a tool_shed_repository database record with new information retrieved from the
Tool Shed. This happens when updating an installed repository to a new changeset revision.
"""
repository.metadata = updated_metadata_dict
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, repository.tool_shed )
suc.clean_dependency_relationships(self.app, updated_metadata_dict, repository, tool_shed_url)
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = updated_changeset_revision
repository.ctx_rev = updated_ctx_rev
# Update the repository.tool_shed_status column in the database.
tool_shed_status_dict = repository_util.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
repository.tool_shed_status = tool_shed_status_dict
else:
repository.tool_shed_status = None
self.app.install_model.context.add( repository )
self.app.install_model.context.flush()
self.app.install_model.context.refresh( repository )
return repository
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method
is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
firmware_update.py | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import http
import http.server
import os
import re
import socket
import ssl
import threading
import time
import unittest
import zlib
from framework.coap_file_server import CoapFileServerThread, CoapFileServer
from framework.lwm2m_test import *
from .access_control import AccessMask
from .block_write import Block, equal_chunk_splitter
UPDATE_STATE_IDLE = 0
UPDATE_STATE_DOWNLOADING = 1
UPDATE_STATE_DOWNLOADED = 2
UPDATE_STATE_UPDATING = 3
UPDATE_RESULT_INITIAL = 0
UPDATE_RESULT_SUCCESS = 1
UPDATE_RESULT_NOT_ENOUGH_SPACE = 2
UPDATE_RESULT_OUT_OF_MEMORY = 3
UPDATE_RESULT_CONNECTION_LOST = 4
UPDATE_RESULT_INTEGRITY_FAILURE = 5
UPDATE_RESULT_UNSUPPORTED_PACKAGE_TYPE = 6
UPDATE_RESULT_INVALID_URI = 7
UPDATE_RESULT_FAILED = 8
UPDATE_RESULT_UNSUPPORTED_PROTOCOL = 9
FIRMWARE_PATH = '/firmware'
FIRMWARE_SCRIPT_TEMPLATE = '#!/bin/sh\n%secho updated > "%s"\nrm "$0"\n'
class FirmwareUpdate:
class Test(test_suite.Lwm2mSingleServerTest):
def set_auto_deregister(self, auto_deregister):
self.auto_deregister = auto_deregister
def set_check_marker(self, check_marker):
self.check_marker = check_marker
def set_expect_send_after_state_machine_reset(self, expect_send_after_state_machine_reset):
self.expect_send_after_state_machine_reset = expect_send_after_state_machine_reset
def setUp(self, garbage=0, *args, **kwargs):
garbage_lines = ''
while garbage > 0:
garbage_line = '#' * (min(garbage, 80) - 1) + '\n'
garbage_lines += garbage_line
garbage -= len(garbage_line)
self.ANJAY_MARKER_FILE = generate_temp_filename(
dir='/tmp', prefix='anjay-fw-updated-')
self.FIRMWARE_SCRIPT_CONTENT = \
(FIRMWARE_SCRIPT_TEMPLATE %
(garbage_lines, self.ANJAY_MARKER_FILE)).encode('ascii')
super().setUp(fw_updated_marker_path=self.ANJAY_MARKER_FILE, *args, **kwargs)
def tearDown(self):
auto_deregister = getattr(self, 'auto_deregister', True)
check_marker = getattr(self, 'check_marker', False)
expect_send_after_state_machine_reset = getattr(self,
'expect_send_after_state_machine_reset',
False)
try:
if check_marker:
for _ in range(10):
time.sleep(0.5)
if os.path.isfile(self.ANJAY_MARKER_FILE):
break
else:
self.fail('firmware marker not created')
with open(self.ANJAY_MARKER_FILE, "rb") as f:
line = f.readline()[:-1]
self.assertEqual(line, b"updated")
os.unlink(self.ANJAY_MARKER_FILE)
finally:
if auto_deregister:
# reset the state machine
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, '')
self.serv.send(req)
if expect_send_after_state_machine_reset:
pkt = self.serv.recv()
self.assertMsgEqual(Lwm2mSend(), pkt)
CBOR.parse(pkt.content).verify_values(test=self,
expected_value_map={
ResPath.FirmwareUpdate.State: UPDATE_STATE_IDLE,
ResPath.FirmwareUpdate.UpdateResult: UPDATE_RESULT_INITIAL
})
self.serv.send(Lwm2mChanged.matching(pkt)())
self.assertMsgEqual(
Lwm2mChanged.matching(req)(), self.serv.recv())
super().tearDown(auto_deregister=auto_deregister)
def read_update_result(self):
req = Lwm2mRead(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(), res)
return int(res.content)
def read_state(self):
req = Lwm2mRead(ResPath.FirmwareUpdate.State)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(), res)
return int(res.content)
def write_firmware_and_wait_for_download(self, firmware_uri: str,
download_timeout_s=20):
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, firmware_uri)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# wait until client downloads the firmware
deadline = time.time() + download_timeout_s
while time.time() < deadline:
time.sleep(0.5)
if self.read_state() == UPDATE_STATE_DOWNLOADED:
return
self.fail('firmware still not downloaded')
class TestWithHttpServer(Test):
RESPONSE_DELAY = 0
CHUNK_SIZE = sys.maxsize
ETAGS = False
def get_firmware_uri(self):
return 'http://127.0.0.1:%d%s' % (self.http_server.server_address[1], FIRMWARE_PATH)
def provide_response(self, use_real_app=False):
with self._response_cv:
self.assertIsNone(self._response_content)
if use_real_app:
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
self._response_content = make_firmware_package(
firmware)
else:
self._response_content = make_firmware_package(
self.FIRMWARE_SCRIPT_CONTENT)
self._response_cv.notify()
def _create_server(self):
test_case = self
class FirmwareRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'text/plain')
if test_case.ETAGS:
self.send_header('ETag', '"some_etag"')
self.end_headers()
# This condition variable makes it possible to defer sending the response.
# FirmwareUpdateStateChangeTest uses it to ensure demo has enough time
# to send the interim "Downloading" state notification.
with test_case._response_cv:
while test_case._response_content is None:
test_case._response_cv.wait()
response_content = test_case._response_content
test_case.requests.append(self.path)
test_case._response_content = None
def chunks(data):
for i in range(0, len(response_content), test_case.CHUNK_SIZE):
yield response_content[i:i + test_case.CHUNK_SIZE]
for chunk in chunks(response_content):
time.sleep(test_case.RESPONSE_DELAY)
self.wfile.write(chunk)
self.wfile.flush()
def log_request(self, code='-', size='-'):
# don't display logs on successful request
pass
return http.server.HTTPServer(('', 0), FirmwareRequestHandler)
def write_firmware_and_wait_for_download(self, *args, **kwargs):
requests = list(self.requests)
super().write_firmware_and_wait_for_download(*args, **kwargs)
self.assertEqual(requests + ['/firmware'], self.requests)
def setUp(self, *args, **kwargs):
self.requests = []
self._response_content = None
self._response_cv = threading.Condition()
self.http_server = self._create_server()
super().setUp(*args, **kwargs)
self.server_thread = threading.Thread(
target=lambda: self.http_server.serve_forever())
self.server_thread.start()
def tearDown(self):
try:
super().tearDown()
finally:
self.http_server.shutdown()
self.server_thread.join()
class TestWithTlsServer(Test):
@staticmethod
def _generate_pem_cert_and_key(cn='127.0.0.1', alt_ip='127.0.0.1'):
import datetime
import ipaddress
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
key = rsa.generate_private_key(public_exponent=65537, key_size=2048,
backend=default_backend())
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, cn)])
now = datetime.datetime.utcnow()
cert_builder = (x509.CertificateBuilder().
subject_name(name).
issuer_name(name).
public_key(key.public_key()).
serial_number(1000).
not_valid_before(now).
not_valid_after(now + datetime.timedelta(days=1)))
if alt_ip is not None:
cert_builder = cert_builder.add_extension(x509.SubjectAlternativeName(
[x509.DNSName(cn), x509.IPAddress(ipaddress.IPv4Address(alt_ip))]),
critical=False)
cert = cert_builder.sign(key, hashes.SHA256(), default_backend())
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
return cert_pem, key_pem
def setUp(self, pass_cert_to_demo=True, **kwargs):
cert_kwargs = {}
for key in ('cn', 'alt_ip'):
if key in kwargs:
cert_kwargs[key] = kwargs[key]
del kwargs[key]
cert_pem, key_pem = self._generate_pem_cert_and_key(**cert_kwargs)
with tempfile.NamedTemporaryFile(delete=False) as cert_file, \
tempfile.NamedTemporaryFile(delete=False) as key_file:
cert_file.write(cert_pem)
cert_file.flush()
key_file.write(key_pem)
key_file.flush()
self._cert_file = cert_file.name
self._key_file = key_file.name
extra_cmdline_args = []
if 'extra_cmdline_args' in kwargs:
extra_cmdline_args += kwargs['extra_cmdline_args']
del kwargs['extra_cmdline_args']
if pass_cert_to_demo:
extra_cmdline_args += ['--fw-cert-file', self._cert_file]
super().setUp(extra_cmdline_args=extra_cmdline_args, **kwargs)
def tearDown(self):
def unlink_without_err(fname):
try:
os.unlink(fname)
except:
print('unlink(%r) failed' % (fname,))
sys.excepthook(*sys.exc_info())
try:
super().tearDown()
finally:
unlink_without_err(self._cert_file)
unlink_without_err(self._key_file)
class TestWithHttpsServer(TestWithTlsServer, TestWithHttpServer):
def get_firmware_uri(self):
http_uri = super().get_firmware_uri()
assert http_uri[:5] == 'http:'
return 'https:' + http_uri[5:]
def _create_server(self):
http_server = super()._create_server()
http_server.socket = ssl.wrap_socket(http_server.socket, certfile=self._cert_file,
keyfile=self._key_file,
server_side=True)
return http_server
class TestWithCoapServer(Test):
def setUp(self, coap_server=None, *args, **kwargs):
super().setUp(*args, **kwargs)
self.server_thread = CoapFileServerThread(coap_server=coap_server)
self.server_thread.start()
@property
def file_server(self):
return self.server_thread.file_server
def tearDown(self):
try:
super().tearDown()
finally:
self.server_thread.join()
class DemoArgsExtractorMixin:
def _get_valgrind_args(self):
# these tests call demo_process.kill(), so Valgrind is not really useful
return []
def _start_demo(self, cmdline_args, timeout_s=30):
self.cmdline_args = cmdline_args
return super()._start_demo(cmdline_args, timeout_s)
class TestWithPartialDownload:
GARBAGE_SIZE = 8000
def wait_for_half_download(self):
# roughly twice the time expected as per SlowServer
deadline = time.time() + self.GARBAGE_SIZE / 1000
fsize = 0
while time.time() < deadline:
time.sleep(0.5)
fsize = os.stat(self.fw_file_name).st_size
if fsize * 2 > self.GARBAGE_SIZE:
break
if fsize * 2 <= self.GARBAGE_SIZE:
self.fail('firmware image not downloaded fast enough')
elif fsize > self.GARBAGE_SIZE:
self.fail('firmware image downloaded too quickly')
def setUp(self, *args, **kwargs):
super().setUp(garbage=self.GARBAGE_SIZE, *args, **kwargs)
import tempfile
with tempfile.NamedTemporaryFile(delete=False) as f:
self.fw_file_name = f.name
self.communicate('set-fw-package-path %s' %
(os.path.abspath(self.fw_file_name)))
class TestWithPartialDownloadAndRestart(TestWithPartialDownload, DemoArgsExtractorMixin):
def tearDown(self):
with open(self.fw_file_name, "rb") as f:
self.assertEqual(f.read(), self.FIRMWARE_SCRIPT_CONTENT)
super().tearDown()
class TestWithPartialCoapDownloadAndRestart(TestWithPartialDownloadAndRestart,
TestWithCoapServer):
def setUp(self):
class SlowServer(coap.Server):
def send(self, *args, **kwargs):
time.sleep(0.5)
result = super().send(*args, **kwargs)
self.reset() # allow requests from other ports
return result
super().setUp(coap_server=SlowServer())
with self.file_server as file_server:
file_server.set_resource('/firmware',
make_firmware_package(self.FIRMWARE_SCRIPT_CONTENT))
self.fw_uri = file_server.get_resource_uri('/firmware')
class TestWithPartialHttpDownloadAndRestart(TestWithPartialDownloadAndRestart,
TestWithHttpServer):
def get_etag(self, response_content):
return '"%d"' % zlib.crc32(response_content)
def check_success(self, handler, response_content, response_etag):
pass
def send_headers(self, handler, response_content, response_etag):
pass
def _create_server(self):
test_case = self
class FirmwareRequestHandler(http.server.BaseHTTPRequestHandler):
def send_response(self, *args, **kwargs):
self._response_sent = True
return super().send_response(*args, **kwargs)
def do_GET(self):
self._response_sent = False
test_case.requests.append(self.path)
# This condition variable makes it possible to defer sending the response.
# FirmwareUpdateStateChangeTest uses it to ensure demo has enough time
# to send the interim "Downloading" state notification.
with test_case._response_cv:
while test_case._response_content is None:
test_case._response_cv.wait()
response_content = test_case._response_content
response_etag = test_case.get_etag(response_content)
test_case.check_success(
self, response_content, response_etag)
if self._response_sent:
return
test_case._response_content = None
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'text/plain')
if response_etag is not None:
self.send_header('ETag', response_etag)
offset = test_case.send_headers(
self, response_content, response_etag)
if offset is None:
offset = 0
self.end_headers()
while offset < len(response_content):
chunk = response_content[offset:offset + 1024]
self.wfile.write(chunk)
offset += len(chunk)
time.sleep(0.5)
def log_message(self, *args, **kwargs):
# don't display logs
pass
class SilentServer(http.server.HTTPServer):
def handle_error(self, *args, **kwargs):
# don't log BrokenPipeErrors
if not isinstance(sys.exc_info()[1], BrokenPipeError):
super().handle_error(*args, **kwargs)
return SilentServer(('', 0), FirmwareRequestHandler)
class FirmwareUpdatePackageTest(FirmwareUpdate.Test):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
# Write /5/0/0 (Firmware): script content
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package,
make_firmware_package(self.FIRMWARE_SCRIPT_CONTENT),
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateUriTest(FirmwareUpdate.TestWithHttpServer):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateStateChangeTest(FirmwareUpdate.TestWithHttpServer):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
self.serv.set_timeout(timeout_s=1)
# disable minimum notification period
write_attrs_req = Lwm2mWriteAttributes(
ResPath.FirmwareUpdate.State, query=['pmin=0'])
self.serv.send(write_attrs_req)
self.assertMsgEqual(Lwm2mChanged.matching(
write_attrs_req)(), self.serv.recv())
# initial state should be 0
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.State)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# notification should be sent before downloading
self.assertMsgEqual(Lwm2mNotify(observe_req.token, b'1'),
self.serv.recv())
self.provide_response()
# ... and after it finishes
self.assertMsgEqual(Lwm2mNotify(observe_req.token, b'2'),
self.serv.recv())
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# ... and when update starts
self.assertMsgEqual(Lwm2mNotify(observe_req.token, b'3'),
self.serv.recv())
# there should be exactly one request
self.assertEqual(['/firmware'], self.requests)
class FirmwareUpdateBadBase64(FirmwareUpdate.Test):
def runTest(self):
# Write /5/0/0 (Firmware): some random text to see how it makes the world burn
# (as text context does not implement some_bytes handler).
data = bytes(b'\x01' * 16)
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, data,
format=coap.ContentFormat.TEXT_PLAIN)
self.serv.send(req)
self.assertMsgEqual(Lwm2mErrorResponse.matching(req)(coap.Code.RES_BAD_REQUEST),
self.serv.recv())
class FirmwareUpdateGoodBase64(FirmwareUpdate.Test):
def runTest(self):
import base64
data = base64.encodebytes(bytes(b'\x01' * 16)).replace(b'\n', b'')
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, data,
format=coap.ContentFormat.TEXT_PLAIN)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateNullPkg(FirmwareUpdate.TestWithHttpServer):
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, b'\0',
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateEmptyPkgUri(FirmwareUpdate.TestWithHttpServer):
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, '')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateInvalidUri(FirmwareUpdate.Test):
def runTest(self):
# observe Result
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
b'http://invalidfirmware.exe')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
while True:
notify = self.serv.recv()
self.assertMsgEqual(Lwm2mNotify(observe_req.token), notify)
if int(notify.content) != UPDATE_RESULT_INITIAL:
break
self.assertEqual(UPDATE_RESULT_INVALID_URI, int(notify.content))
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
class FirmwareUpdateUnsupportedUri(FirmwareUpdate.Test):
def runTest(self):
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
b'unsupported://uri.exe')
self.serv.send(req)
self.assertMsgEqual(Lwm2mErrorResponse.matching(req)(coap.Code.RES_BAD_REQUEST),
self.serv.recv())
# This does not even change state or anything, because according to the LwM2M spec
# Server can't feed us with unsupported URI type
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_UNSUPPORTED_PROTOCOL,
self.read_update_result())
class FirmwareUpdateOfflineUriTest(FirmwareUpdate.TestWithHttpServer):
def runTest(self):
self.communicate('enter-offline tcp')
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_CONNECTION_LOST,
self.read_update_result())
class FirmwareUpdateReplacingPkgUri(FirmwareUpdate.TestWithHttpServer):
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
# This isn't specified anywhere as a possible transition, therefore
# it is most likely a bad request.
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, 'http://something')
self.serv.send(req)
self.assertMsgEqual(Lwm2mErrorResponse.matching(req)(coap.Code.RES_BAD_REQUEST),
self.serv.recv())
self.assertEqual(UPDATE_STATE_DOWNLOADED, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateReplacingPkg(FirmwareUpdate.TestWithHttpServer):
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
# This isn't specified anywhere as a possible transition, therefore
# it is most likely a bad request.
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, b'trololo',
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mErrorResponse.matching(req)(coap.Code.RES_BAD_REQUEST),
self.serv.recv())
self.assertEqual(UPDATE_STATE_DOWNLOADED, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateHttpsResumptionTest(FirmwareUpdate.TestWithPartialDownloadAndRestart,
FirmwareUpdate.TestWithHttpsServer):
RESPONSE_DELAY = 0.5
CHUNK_SIZE = 1000
ETAGS = True
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
self.provide_response()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
deadline = time.time() + 20
while time.time() < deadline:
time.sleep(0.5)
if self.read_state() == UPDATE_STATE_DOWNLOADED:
break
else:
raise
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateHttpsCancelPackageTest(FirmwareUpdate.TestWithPartialDownload,
FirmwareUpdate.TestWithHttpServer):
RESPONSE_DELAY = 0.5
CHUNK_SIZE = 1000
ETAGS = True
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.assertEqual(self.get_socket_count(), 2)
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, b'\0',
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_until_socket_count(expected=1, timeout_s=5)
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateHttpsCancelPackageUriTest(FirmwareUpdate.TestWithPartialDownload,
FirmwareUpdate.TestWithHttpServer):
RESPONSE_DELAY = 0.5
CHUNK_SIZE = 1000
ETAGS = True
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.assertEqual(self.get_socket_count(), 2)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, b'')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_until_socket_count(expected=1, timeout_s=5)
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateCoapCancelPackageUriTest(FirmwareUpdate.TestWithPartialDownload,
FirmwareUpdate.TestWithCoapServer):
def runTest(self):
with self.file_server as file_server:
file_server.set_resource('/firmware',
make_firmware_package(self.FIRMWARE_SCRIPT_CONTENT))
fw_uri = file_server.get_resource_uri('/firmware')
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, fw_uri)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# Handle one GET
file_server.handle_request()
self.assertEqual(self.get_socket_count(), 2)
# Cancel download
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, b'')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_until_socket_count(expected=1, timeout_s=5)
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateHttpsOfflineTest(FirmwareUpdate.TestWithPartialDownloadAndRestart,
FirmwareUpdate.TestWithHttpServer):
RESPONSE_DELAY = 0.5
CHUNK_SIZE = 1000
ETAGS = True
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.assertEqual(self.get_socket_count(), 2)
self.communicate('enter-offline tcp')
self.wait_until_socket_count(expected=1, timeout_s=5)
self.provide_response()
self.communicate('exit-offline tcp')
deadline = time.time() + 20
while time.time() < deadline:
time.sleep(0.5)
if self.read_state() == UPDATE_STATE_DOWNLOADED:
break
else:
raise
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateHttpsTest(FirmwareUpdate.TestWithHttpsServer):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(
self.get_firmware_uri(), download_timeout_s=20)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateUnconfiguredHttpsTest(FirmwareUpdate.TestWithHttpsServer):
def setUp(self):
super().setUp(pass_cert_to_demo=False)
def runTest(self):
# disable minimum notification period
write_attrs_req = Lwm2mWriteAttributes(ResPath.FirmwareUpdate.UpdateResult,
query=['pmin=0'])
self.serv.send(write_attrs_req)
self.assertMsgEqual(Lwm2mChanged.matching(
write_attrs_req)(), self.serv.recv())
# initial result should be 0
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# even before reaching the server, we should get an error
notify_msg = self.serv.recv()
# no security information => "Unsupported protocol"
self.assertMsgEqual(Lwm2mNotify(observe_req.token,
str(UPDATE_RESULT_UNSUPPORTED_PROTOCOL).encode()),
notify_msg)
self.serv.send(Lwm2mReset(msg_id=notify_msg.msg_id))
self.assertEqual(0, self.read_state())
class FirmwareUpdateUnconfiguredHttpsWithFallbackAttemptTest(FirmwareUpdate.TestWithHttpsServer):
def setUp(self):
super().setUp(pass_cert_to_demo=False,
psk_identity=b'test-identity', psk_key=b'test-key')
def runTest(self):
# disable minimum notification period
write_attrs_req = Lwm2mWriteAttributes(ResPath.FirmwareUpdate.UpdateResult,
query=['pmin=0'])
self.serv.send(write_attrs_req)
self.assertMsgEqual(Lwm2mChanged.matching(
write_attrs_req)(), self.serv.recv())
# initial result should be 0
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# even before reaching the server, we should get an error
notify_msg = self.serv.recv()
# no security information => client will attempt PSK from data model and fail handshake => "Connection lost"
self.assertMsgEqual(Lwm2mNotify(observe_req.token,
str(UPDATE_RESULT_CONNECTION_LOST).encode()),
notify_msg)
self.serv.send(Lwm2mReset(msg_id=notify_msg.msg_id))
self.assertEqual(0, self.read_state())
class FirmwareUpdateInvalidHttpsTest(FirmwareUpdate.TestWithHttpsServer):
def setUp(self):
super().setUp(cn='invalid_cn', alt_ip=None)
def runTest(self):
# disable minimum notification period
write_attrs_req = Lwm2mWriteAttributes(ResPath.FirmwareUpdate.UpdateResult,
query=['pmin=0'])
self.serv.send(write_attrs_req)
self.assertMsgEqual(Lwm2mChanged.matching(
write_attrs_req)(), self.serv.recv())
# initial result should be 0
observe_req = Lwm2mObserve(ResPath.FirmwareUpdate.UpdateResult)
self.serv.send(observe_req)
self.assertMsgEqual(Lwm2mContent.matching(
observe_req)(content=b'0'), self.serv.recv())
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# even before reaching the server, we should get an error
notify_msg = self.serv.recv()
# handshake failure => "Connection lost"
self.assertMsgEqual(Lwm2mNotify(observe_req.token,
str(UPDATE_RESULT_CONNECTION_LOST).encode()),
notify_msg)
self.serv.send(Lwm2mReset(msg_id=notify_msg.msg_id))
self.assertEqual(0, self.read_state())
class FirmwareUpdateResetInIdleState(FirmwareUpdate.Test):
def runTest(self):
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, b'')
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package, b'\0',
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.assertEqual(UPDATE_STATE_IDLE, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
class FirmwareUpdateCoapUri(FirmwareUpdate.TestWithCoapServer):
def tearDown(self):
super().tearDown()
# there should be exactly one request
with self.file_server as file_server:
self.assertEqual(1, len(file_server.requests))
self.assertMsgEqual(CoapGet('/firmware'),
file_server.requests[0])
def runTest(self):
with self.file_server as file_server:
file_server.set_resource('/firmware',
make_firmware_package(self.FIRMWARE_SCRIPT_CONTENT))
fw_uri = file_server.get_resource_uri('/firmware')
self.write_firmware_and_wait_for_download(fw_uri)
class FirmwareUpdateRestartWithDownloaded(FirmwareUpdate.Test):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
def runTest(self):
# Write /5/0/0 (Firmware): script content
req = Lwm2mWrite(ResPath.FirmwareUpdate.Package,
make_firmware_package(self.FIRMWARE_SCRIPT_CONTENT),
format=coap.ContentFormat.APPLICATION_OCTET_STREAM)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# restart the app
self.teardown_demo_with_servers()
self.setup_demo_with_servers(
fw_updated_marker_path=self.ANJAY_MARKER_FILE)
self.assertEqual(UPDATE_STATE_DOWNLOADED, self.read_state())
self.assertEqual(UPDATE_RESULT_INITIAL, self.read_update_result())
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
class FirmwareUpdateRestartWithDownloading(FirmwareUpdate.TestWithPartialCoapDownloadAndRestart):
def runTest(self):
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, self.fw_uri)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
with self.file_server as file_server:
file_server._server.reset()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
while time.time() < deadline:
fsize = os.stat(self.fw_file_name).st_size
self.assertGreater(fsize * 2, self.GARBAGE_SIZE)
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
class FirmwareUpdateRestartWithDownloadingETagChange(
FirmwareUpdate.TestWithPartialCoapDownloadAndRestart):
def runTest(self):
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI, self.fw_uri)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
with self.file_server as file_server:
old_etag = file_server._resources['/firmware'].etag
new_etag = bytes([(old_etag[0] + 1) % 256]) + old_etag[1:]
self.assertNotEqual(old_etag, new_etag)
file_server.set_resource('/firmware',
make_firmware_package(
self.FIRMWARE_SCRIPT_CONTENT),
etag=new_etag)
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
file_truncated = False
while time.time() < deadline:
try:
fsize = os.stat(self.fw_file_name).st_size
if fsize * 2 <= self.GARBAGE_SIZE:
file_truncated = True
except FileNotFoundError:
file_truncated = True
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertTrue(file_truncated)
class FirmwareUpdateRestartWithDownloadingOverHttp(
FirmwareUpdate.TestWithPartialHttpDownloadAndRestart):
def get_etag(self, response_content):
return None
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.assertEqual(2, self.get_socket_count())
self.assertEqual(1, self.get_non_lwm2m_socket_count())
self.assertEqual('TCP', self.get_transport(socket_index=-1))
self.demo_process.kill()
# restart demo app
self.serv.reset()
self.provide_response()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
file_truncated = False
while time.time() < deadline:
try:
fsize = os.stat(self.fw_file_name).st_size
if fsize * 2 <= self.GARBAGE_SIZE:
file_truncated = True
except FileNotFoundError:
file_truncated = True
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertTrue(file_truncated)
self.assertEqual(len(self.requests), 2)
class FirmwareUpdateResumeDownloadingOverHttp(FirmwareUpdate.TestWithPartialHttpDownloadAndRestart):
def send_headers(self, handler, response_content, response_etag):
if 'Range' in handler.headers:
self.assertEqual(handler.headers['If-Match'], response_etag)
match = re.fullmatch(r'bytes=([0-9]+)-', handler.headers['Range'])
self.assertIsNotNone(match)
offset = int(match.group(1))
handler.send_header('Content-range',
'bytes %d-%d/*' % (offset, len(response_content) - 1))
return offset
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
self.provide_response()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
while time.time() < deadline:
fsize = os.stat(self.fw_file_name).st_size
self.assertGreater(fsize * 2, self.GARBAGE_SIZE)
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertEqual(len(self.requests), 2)
class FirmwareUpdateResumeDownloadingOverHttpWithReconnect(
FirmwareUpdate.TestWithPartialHttpDownloadAndRestart):
def _get_valgrind_args(self):
# we don't kill the process here, so we want Valgrind
return FirmwareUpdate.TestWithHttpServer._get_valgrind_args(self)
def send_headers(self, handler, response_content, response_etag):
if 'Range' in handler.headers:
self.assertEqual(handler.headers['If-Match'], response_etag)
match = re.fullmatch(r'bytes=([0-9]+)-', handler.headers['Range'])
self.assertIsNotNone(match)
offset = int(match.group(1))
handler.send_header('Content-range',
'bytes %d-%d/*' % (offset, len(response_content) - 1))
return offset
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
# reconnect
self.serv.reset()
self.communicate('reconnect')
self.assertDemoRegisters(self.serv)
self.provide_response()
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
while time.time() < deadline:
fsize = os.stat(self.fw_file_name).st_size
self.assertGreater(fsize * 2, self.GARBAGE_SIZE)
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertEqual(len(self.requests), 2)
class FirmwareUpdateResumeFromStartWithDownloadingOverHttp(
FirmwareUpdate.TestWithPartialHttpDownloadAndRestart):
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
self.provide_response()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
while time.time() < deadline:
fsize = os.stat(self.fw_file_name).st_size
self.assertGreater(fsize * 2, self.GARBAGE_SIZE)
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertEqual(len(self.requests), 2)
class FirmwareUpdateRestartAfter412WithDownloadingOverHttp(
FirmwareUpdate.TestWithPartialHttpDownloadAndRestart):
def check_success(self, handler, response_content, response_etag):
if 'If-Match' in handler.headers:
self.assertEqual(handler.headers['If-Match'], response_etag)
handler.send_error(http.HTTPStatus.PRECONDITION_FAILED)
def runTest(self):
self.provide_response()
# Write /5/0/1 (Firmware URI)
req = Lwm2mWrite(ResPath.FirmwareUpdate.PackageURI,
self.get_firmware_uri())
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
self.wait_for_half_download()
self.demo_process.kill()
# restart demo app
self.serv.reset()
self.provide_response()
self._start_demo(self.cmdline_args)
self.assertDemoRegisters(self.serv)
# wait until client downloads the firmware
deadline = time.time() + 20
state = None
file_truncated = False
while time.time() < deadline:
try:
fsize = os.stat(self.fw_file_name).st_size
if fsize * 2 <= self.GARBAGE_SIZE:
file_truncated = True
except FileNotFoundError:
file_truncated = True
state = self.read_state()
self.assertIn(
state, {UPDATE_STATE_DOWNLOADING, UPDATE_STATE_DOWNLOADED})
if state == UPDATE_STATE_DOWNLOADED:
break
# prevent test from reading Result hundreds of times per second
time.sleep(0.5)
self.assertEqual(state, UPDATE_STATE_DOWNLOADED)
self.assertTrue(file_truncated)
self.assertEqual(len(self.requests), 3)
class FirmwareUpdateWithDelayedSuccessTest(Block.Test):
def runTest(self):
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
# Write /5/0/0 (Firmware)
self.block_send(firmware,
equal_chunk_splitter(chunk_size=1024),
force_error=FirmwareUpdateForcedError.DelayedSuccess)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
self.serv.reset()
self.assertDemoRegisters()
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.UpdateResult).content,
str(UPDATE_RESULT_SUCCESS).encode())
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.State).content,
str(UPDATE_STATE_IDLE).encode())
class FirmwareUpdateWithDelayedFailureTest(Block.Test):
def runTest(self):
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
# Write /5/0/0 (Firmware)
self.block_send(firmware,
equal_chunk_splitter(chunk_size=1024),
force_error=FirmwareUpdateForcedError.DelayedSuccess)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
self.serv.reset()
self.assertDemoRegisters()
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.UpdateResult).content,
str(UPDATE_RESULT_SUCCESS).encode())
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.State).content,
str(UPDATE_STATE_IDLE).encode())
class FirmwareUpdateWithSetSuccessInPerformUpgrade(Block.Test):
def runTest(self):
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
# Write /5/0/0 (Firmware)
self.block_send(firmware,
equal_chunk_splitter(chunk_size=1024),
force_error=FirmwareUpdateForcedError.SetSuccessInPerformUpgrade)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# perform_upgrade handler is called via scheduler, so there is a small
# window during which reading the Firmware Update State still returns
# Updating. Wait for a while for State to actually change.
observed_states = []
deadline = time.time() + 5 # arbitrary limit
while not observed_states or observed_states[-1] == str(UPDATE_STATE_UPDATING):
if time.time() > deadline:
self.fail('Firmware Update did not finish on time, last state = %s' % (
observed_states[-1] if observed_states else 'NONE'))
observed_states.append(
self.read_path(self.serv, ResPath.FirmwareUpdate.State).content.decode())
time.sleep(0.5)
self.assertNotEqual([], observed_states)
self.assertEqual(observed_states[-1], str(UPDATE_STATE_IDLE))
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.UpdateResult).content,
str(UPDATE_RESULT_SUCCESS).encode())
class FirmwareUpdateWithSetFailureInPerformUpgrade(Block.Test):
def runTest(self):
with open(os.path.join(self.config.demo_path, self.config.demo_cmd), 'rb') as f:
firmware = f.read()
# Write /5/0/0 (Firmware)
self.block_send(firmware,
equal_chunk_splitter(chunk_size=1024),
force_error=FirmwareUpdateForcedError.SetFailureInPerformUpgrade)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
# perform_upgrade handler is called via scheduler, so there is a small
# window during which reading the Firmware Update State still returns
# Updating. Wait for a while for State to actually change.
observed_states = []
deadline = time.time() + 5 # arbitrary limit
while not observed_states or observed_states[-1] == str(UPDATE_STATE_UPDATING):
if time.time() > deadline:
self.fail('Firmware Update did not finish on time, last state = %s' % (
observed_states[-1] if observed_states else 'NONE'))
observed_states.append(
self.read_path(self.serv, ResPath.FirmwareUpdate.State).content.decode())
time.sleep(0.5)
self.assertNotEqual([], observed_states)
self.assertEqual(observed_states[-1], str(UPDATE_STATE_IDLE))
self.assertEqual(self.read_path(self.serv, ResPath.FirmwareUpdate.UpdateResult).content,
str(UPDATE_RESULT_FAILED).encode())
class FirmwareUpdateWeakEtagTest(FirmwareUpdate.TestWithHttpServer):
def setUp(self):
super().setUp()
self.set_check_marker(True)
self.set_auto_deregister(False)
orig_end_headers = self.http_server.RequestHandlerClass.end_headers
def updated_end_headers(request_handler):
request_handler.send_header('ETag', 'W/"weaketag"')
orig_end_headers(request_handler)
self.http_server.RequestHandlerClass.end_headers = updated_end_headers
def runTest(self):
self.provide_response()
self.write_firmware_and_wait_for_download(self.get_firmware_uri())
with open(self.ANJAY_MARKER_FILE, 'rb') as f:
marker_data = f.read()
self.assertNotIn(b'weaketag', marker_data)
# Execute /5/0/2 (Update)
req = Lwm2mExecute(ResPath.FirmwareUpdate.Update)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.serv.recv())
|
Binance Detect Moonings.py | """
Omaha Group Fork
Version: Wendy 1.0
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repository should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
See requirements.txt for versions of modules needed:
Notes:
- Requires Python version 3.9.x to run
Functionality:
- Changed way profit % is calculated to be based on ROI
- More details provided on screen on state of bot (i.e. unrealised session profit, session profit, all time profit, bot paused or not etc)
- Totally reworked external signals. NOTE: you CANNOT use the default signals anymore with my bot unless you modify them to work with it
- Sell all coins on stopping bot functionality
- Stop bot on session profit / session stop loss trigger
- Discord support
- Better reporting in trades.txt
- A history.txt that records state of bot every minute (useful for past analysis /charting)
- Better error trapping on certain exceptions
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_discord_creds
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit_incfees_perc, session_profit_incfees_total, session_tpsl_override_msg, is_bot_running
session_profit_incfees_perc = 0
session_profit_incfees_total = 0
session_tpsl_override_msg = ""
is_bot_running = True
global historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins, trade_losses
global sell_all_coins, bot_started_datetime
try:
historic_profit_incfees_perc
except NameError:
historic_profit_incfees_perc = 0 # or some other default value.
try:
historic_profit_incfees_total
except NameError:
historic_profit_incfees_total = 0 # or some other default value.
try:
trade_wins
except NameError:
trade_wins = 0 # or some other default value.
try:
trade_losses
except NameError:
trade_losses = 0 # or some other default value.
bot_started_datetime = ""
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def is_fiat():
# check if we are using a fiat as a base currency
global hsp_head
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
#list below is in the order that Binance displays them, apologies for not using ASC order
fiats = ['USDT', 'BUSD', 'AUD', 'BRL', 'EUR', 'GBP', 'RUB', 'TRY', 'TUSD', 'USDC', 'PAX', 'BIDR', 'DAI', 'IDRT', 'UAH', 'NGN', 'VAI', 'BVND']
if PAIR_WITH in fiats:
return True
else:
return False
def decimals():
# set number of decimals for reporting fractions
if is_fiat():
return 4
else:
return 8
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
# retrieve latest prices
last_price = get_price()
# Moved to the end of this method
# balance_report(last_price)
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than TRADE_SLOTS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# volatility_cooloff[coin] = datetime.now() - timedelta(minutes=COOLOFF_PERIOD)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
#if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < TRADE_SLOTS or TRADE_SLOTS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, purchasing ${TRADE_TOTAL} {PAIR_WITH} of {coin}!')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
#if len(coins_bought) == TRADE_SLOTS:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but failed cool off period of {COOLOFF_PERIOD} minutes! Curr COP is {volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD)}{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = buy_external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + len(volatile_coins)) < TRADE_SLOTS:
#(len(coins_bought) + exnumber + len(volatile_coins)) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f"External signal received on {excoin}, purchasing ${TRADE_TOTAL} {PAIR_WITH} value of {excoin}!")
balance_report(last_price)
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def buy_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.buy")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def sell_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.sell")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
if DEBUG: print(f'{symbol} added to sell_external_signals() list')
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external SELL signalling file{txcolors.DEFAULT}')
return external_list
def balance_report(last_price):
global trade_wins, trade_losses, session_profit_incfees_perc, session_profit_incfees_total
unrealised_session_profit_incfees_perc = 0
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
exposure_calcuated = 0
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
exposure_calcuated = exposure_calcuated + round(float(coins_bought[coin]['bought_at']) * float(coins_bought[coin]['volume']),0)
#PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
# unrealised_session_profit_incfees_perc = float(unrealised_session_profit_incfees_perc + PriceChangeIncFees_Perc)
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
unrealised_session_profit_incfees_perc = (unrealised_session_profit_incfees_total / BUDGET) * 100
DECIMALS = int(decimals())
# CURRENT_EXPOSURE = round((TRADE_TOTAL * len(coins_bought)), DECIMALS)
CURRENT_EXPOSURE = round(exposure_calcuated, 0)
INVESTMENT_TOTAL = round((TRADE_TOTAL * TRADE_SLOTS), DECIMALS)
# truncating some of the above values to the correct decimal places before printing
WIN_LOSS_PERCENT = 0
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
print(f'')
print(f'--------')
print(f'STARTED : {bot_started_datetime} | Running for: {datetime.now() - bot_started_datetime}')
print(f'CURRENT HOLDS : {len(coins_bought)}/{TRADE_SLOTS} ({float(CURRENT_EXPOSURE):g}/{float(INVESTMENT_TOTAL):g} {PAIR_WITH})')
print(f'Buying Paused : {bot_paused}')
print(f'')
print(f'SESSION PROFIT (Inc Fees)')
print(f'Realised : {txcolors.SELL_PROFIT if session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Unrealised : {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{unrealised_session_profit_incfees_perc:.4f}% Est:${unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f' Total : {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc + unrealised_session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total+unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'')
print(f'ALL TIME DATA :')
print(f'Profit : {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est:${historic_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Completed Trades: {trade_wins+trade_losses} (Wins:{trade_wins} Losses:{trade_losses})')
print(f'Win Ratio : {float(WIN_LOSS_PERCENT):g}%')
print(f'--------')
print(f'')
#msg1 = str(bot_started_datetime) + " | " + str(datetime.now() - bot_started_datetime)
msg1 = str(datetime.now())
msg2 = " | Trade Slots - " + str(len(coins_bought)) + "/" + str(TRADE_SLOTS) + " | Bot Paused: " + str(bot_paused) + ' '
msg2 = msg2 + ' | Profit $:' + str(round(session_profit_incfees_total,4)) + ' (' + str(round(session_profit_incfees_perc,2)) + '%) '
msg2 = msg2 + ' | Unrealised $:' + str(round(unrealised_session_profit_incfees_total,4)) + ' (' + str(round(unrealised_session_profit_incfees_perc,2)) + '%) '
msg2 = msg2 + ' | Session Profit $:' + str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,4)) + ' (' + str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,2)) + '%) '
msg2 = msg2 + ' | All Time Profit $: ' + str(round(historic_profit_incfees_total,4)) + ' (' + str(round(historic_profit_incfees_perc,2)) + '%) '
msg2 = msg2 + ' | Total Trades: ' + str(trade_wins+trade_losses) + ' (Won: ' + str(trade_wins) + ' Lost: ' + str(trade_losses) + ') - Win %: ' + str(round(WIN_LOSS_PERCENT,2))
msg_discord_balance(msg1, msg2)
history_log(session_profit_incfees_perc, session_profit_incfees_total, unrealised_session_profit_incfees_perc, unrealised_session_profit_incfees_total, session_profit_incfees_perc + unrealised_session_profit_incfees_perc, session_profit_incfees_total+unrealised_session_profit_incfees_total, historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins+trade_losses, trade_wins, trade_losses, WIN_LOSS_PERCENT)
return msg1 + msg2
def history_log(sess_profit_perc, sess_profit, sess_profit_perc_unreal, sess_profit_unreal, sess_profit_perc_total, sess_profit_total, alltime_profit_perc, alltime_profit, total_trades, won_trades, lost_trades, winloss_ratio):
global last_history_log_date
time_between_insertion = datetime.now() - last_history_log_date
# only log balance to log file once every 60 seconds
if time_between_insertion.seconds > 60:
last_history_log_date = datetime.now()
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(HISTORY_LOG_FILE):
with open(HISTORY_LOG_FILE,'a+') as f:
f.write('Datetime\tCoins Holding\tTrade Slots\tPausebot Active\tSession Profit %\tSession Profit $\tSession Profit Unrealised %\tSession Profit Unrealised $\tSession Profit Total %\tSession Profit Total $\tAll Time Profit %\tAll Time Profit $\tTotal Trades\tWon Trades\tLost Trades\tWin Loss Ratio\n')
with open(HISTORY_LOG_FILE,'a+') as f:
f.write(f'{timestamp}\t{len(coins_bought)}\t{TRADE_SLOTS}\t{str(bot_paused)}\t{str(round(sess_profit_perc,2))}\t{str(round(sess_profit,4))}\t{str(round(sess_profit_perc_unreal,2))}\t{str(round(sess_profit_unreal,4))}\t{str(round(sess_profit_perc_total,2))}\t{str(round(sess_profit_total,4))}\t{str(round(alltime_profit_perc,2))}\t{str(round(alltime_profit,4))}\t{str(total_trades)}\t{str(won_trades)}\t{str(lost_trades)}\t{str(winloss_ratio)}\n')
def msg_discord_balance(msg1, msg2):
global last_msg_discord_balance_date, discord_msg_balance_data
time_between_insertion = datetime.now() - last_msg_discord_balance_date
# only put the balance message to discord once every 60 seconds and if the balance information has changed since last times
if time_between_insertion.seconds > 60:
if msg2 != discord_msg_balance_data:
msg_discord(msg1 + msg2)
discord_msg_balance_data = msg2
else:
# ping msg to know the bot is still running
msg_discord("...")
def msg_discord(msg):
message = msg + '\n\n'
if MSG_DISCORD:
#Webhook of my channel. Click on edit channel --> Webhooks --> Creates webhook
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
#BB
# print(response.content)
def pause_bot():
'''Pause the script when external indicators detect a bearish trend in the market'''
global bot_paused, session_profit_incfees_perc, hsp_head, session_profit_incfees_total
# start counting for how long the bot has been paused
start_time = time.perf_counter()
while os.path.exists("signals/pausebot.pause"):
# do NOT accept any external signals to buy while in pausebot mode
remove_external_signals('buy')
if bot_paused == False:
print(f'{txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Buying paused due to negative market conditions, stop loss and take profit will continue to work.'
msg_discord(msg)
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
last_price = get_price(True)
# pausing here
if hsp_head == 1:
# print(f'Paused...Session profit: {session_profit_incfees_perc:.2f}% Est: ${session_profit_incfees_total:.{decimals()}f} {PAIR_WITH}')
balance_report(last_price)
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Resuming buying due to positive market conditions, total sleep time: ' + str(time_elapsed)
msg_discord(msg)
bot_paused = False
return
def convert_volume():
'''Converts the volume given in TRADE_TOTAL from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from TRADE_TOTAL in PAIR_WITH (default)
volume[coin] = float(TRADE_TOTAL / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
# original code: volume[coin] = float('{:.1f}'.format(volume[coin]))
volume[coin] = int(volume[coin])
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
#volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
volume[coin] = truncate(volume[coin], lot_size[coin])
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} of {coin} @ ${last_price[coin]['price']}{txcolors.DEFAULT}")
msg1 = str(datetime.now()) + ' | BUY: ' + coin + '. V:' + str(volume[coin]) + ' P$:' + str(last_price[coin]['price'])
msg_discord(msg1)
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
#if LOG_TRADES:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
write_signallsell(coin.removesuffix(PAIR_WITH))
continue
# try to create a real order if the test orders did not raise an exception
try:
order_details = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(f'buy() exception: {e}')
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
if not TEST_MODE:
orders[coin] = extract_order_data(order_details)
write_log(f"\tBuy\t{coin}\t{orders[coin]['volume']}\t{orders[coin]['avgPrice']}\t{PAIR_WITH}")
else:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
write_signallsell(coin)
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins(tpsl_override = False):
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit_incfees_perc, session_profit_incfees_total, coin_order_id, trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total, sell_all_coins
externals = sell_external_signals()
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
BUDGET = TRADE_TOTAL * TRADE_SLOTS
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
sellFeeTotal = (coins_bought[coin]['volume'] * LastPrice) * (TRADING_FEE/100)
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
buyFeeTotal = (coins_bought[coin]['volume'] * BuyPrice) * (TRADING_FEE/100)
PriceChange_Perc = float((LastPrice - BuyPrice) / BuyPrice * 100)
PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['take_profit']) / 100))
SL = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['stop_loss']) / 100))
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
if PriceChange_Perc >= 0.8:
# price has changed by 0.8% or greater, a big change. Make the STOP LOSS trail closely to the TAKE PROFIT
# so you don't lose this increase in price if it falls back
coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
else:
# price has changed by less than 0.8%, a small change. Make the STOP LOSS trail loosely to the TAKE PROFIT
# so you don't get stopped out of the trade prematurely
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
# if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.{decimals()}f} and SL {coins_bought[coin]['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
sellCoin = False
sell_reason = ""
if SELL_ON_SIGNAL_ONLY:
# only sell if told to by external signal
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
else:
if LastPrice < SL:
sellCoin = True
if USE_TRAILING_STOP_LOSS:
if PriceChange_Perc >= 0:
sell_reason = "TTP "
else:
sell_reason = "TSL "
else:
sell_reason = "SL "
sell_reason = sell_reason + str(TP) + " reached"
if LastPrice > TP:
sellCoin = True
sell_reason = "TP " + str(SL) + " reached"
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
if sell_all_coins:
sellCoin = True
sell_reason = 'Sell All Coins'
if tpsl_override:
sellCoin = True
sell_reason = 'Session TPSL Override reached'
if sellCoin:
print(f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}Sell: {coins_bought[coin]['volume']} of {coin} | {sell_reason} | ${float(LastPrice):g} - ${float(BuyPrice):g} | Profit: {PriceChangeIncFees_Perc:.2f}% Est: {((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH} (Inc Fees){txcolors.DEFAULT}")
msg1 = str(datetime.now()) + '| SELL: ' + coin + '. R:' + sell_reason + ' P%:' + str(round(PriceChangeIncFees_Perc,2)) + ' P$:' + str(round(((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100,4))
msg_discord(msg1)
# try to create a real order
try:
if not TEST_MODE:
#lot_size = coins_bought[coin]['step_size']
#if lot_size == 0:
# lot_size = 1
#lot_size = lot_size.index('1') - 1
#if lot_size < 0:
# lot_size = 0
order_details = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
#if repr(e).upper() == "APIERROR(CODE=-1111): PRECISION IS OVER THE MAXIMUM DEFINED FOR THIS ASSET.":
print(f"sell_coins() Exception occured on selling the coin! Coin: {coin}\nSell Volume coins_bought: {coins_bought[coin]['volume']}\nPrice:{LastPrice}\nException: {e}")
# run the else block if coin has been sold and create a dict for each coin sold
else:
if not TEST_MODE:
coins_sold[coin] = extract_order_data(order_details)
LastPrice = coins_sold[coin]['avgPrice']
sellFee = coins_sold[coin]['tradeFeeUnit']
coins_sold[coin]['orderid'] = coins_bought[coin]['orderid']
priceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# update this from the actual Binance sale information
PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
if DEBUG:
print(f"sell_coins() | Coin: {coin} | Sell Volume: {coins_bought[coin]['volume']} | Price:{LastPrice}")
# Log trade
#BB profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume']) * (1-(buyFee + sellFeeTotal))
profit_incfees_total = coins_sold[coin]['volume'] * PriceChangeIncFees_Unit
#write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit_incfees_total:.{decimals()}f} {PAIR_WITH} ({PriceChange_Perc:.2f}%)")
write_log(f"\tSell\t{coin}\t{coins_sold[coin]['volume']}\t{BuyPrice}\t{PAIR_WITH}\t{LastPrice}\t{profit_incfees_total:.{decimals()}f}\t{PriceChange_Perc:.2f}\t{sell_reason}")
#this is good
session_profit_incfees_total = session_profit_incfees_total + profit_incfees_total
session_profit_incfees_perc = session_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
historic_profit_incfees_total = historic_profit_incfees_total + profit_incfees_total
historic_profit_incfees_perc = historic_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
#TRADE_TOTAL*PriceChangeIncFees_Perc)/100
if (LastPrice+sellFee) >= (BuyPrice+buyFee):
trade_wins += 1
else:
trade_losses += 1
update_bot_stats()
if not sell_all_coins:
# within sell_all_coins, it will print display to screen
balance_report(last_price)
# sometimes get "rate limited" errors from Binance if we try to sell too many coins at once
# so wait 1 second in between sells
time.sleep(1)
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
#print(f"Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({(TRADE_TOTAL*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
print(f"Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
if hsp_head == 1 and len(coins_bought) == 0: print(f"No trade slots are currently in use")
# if tpsl_override: is_bot_running = False
return coins_sold
def extract_order_data(order_details):
global TRADING_FEE, STOP_LOSS, TAKE_PROFIT
transactionInfo = {}
# This code is from GoranJovic - thank you!
#
# adding order fill extractions here
#
# just to explain what I am doing here:
# Market orders are not always filled at one price, we need to find the averages of all 'parts' (fills) of this order.
#
# reset other variables to 0 before use
FILLS_TOTAL = 0
FILLS_QTY = 0
FILLS_FEE = 0
BNB_WARNING = 0
# loop through each 'fill':
for fills in order_details['fills']:
FILL_PRICE = float(fills['price'])
FILL_QTY = float(fills['qty'])
FILLS_FEE += float(fills['commission'])
# check if the fee was in BNB. If not, log a nice warning:
if (fills['commissionAsset'] != 'BNB') and (TRADING_FEE == 0.075) and (BNB_WARNING == 0):
print(f"WARNING: BNB not used for trading fee, please ")
BNB_WARNING += 1
# quantity of fills * price
FILLS_TOTAL += (FILL_PRICE * FILL_QTY)
# add to running total of fills quantity
FILLS_QTY += FILL_QTY
# increase fills array index by 1
# calculate average fill price:
FILL_AVG = (FILLS_TOTAL / FILLS_QTY)
#tradeFeeApprox = (float(FILLS_QTY) * float(FILL_AVG)) * (TRADING_FEE/100)
# Olorin Sledge: I only want fee at the unit level, not the total level
tradeFeeApprox = float(FILL_AVG) * (TRADING_FEE/100)
# the volume size is sometimes outside of precision, correct it
try:
info = client.get_symbol_info(order_details['symbol'])
step_size = info['filters'][2]['stepSize']
lot_size = step_size.index('1') - 1
if lot_size <= 0:
FILLS_QTY = int(FILLS_QTY)
else:
FILLS_QTY = truncate(FILLS_QTY, lot_size)
except Exception as e:
print(f"extract_order_data(): Exception getting coin {order_details['symbol']} step size! Exception: {e}")
# create object with received data from Binance
transactionInfo = {
'symbol': order_details['symbol'],
'orderId': order_details['orderId'],
'timestamp': order_details['transactTime'],
'avgPrice': float(FILL_AVG),
'volume': float(FILLS_QTY),
'tradeFeeBNB': float(FILLS_FEE),
'tradeFeeUnit': tradeFeeApprox,
}
return transactionInfo
def check_total_session_profit(coins_bought, last_price):
global is_bot_running, session_tpsl_override_msg
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
allsession_profits_perc = session_profit_incfees_perc + ((unrealised_session_profit_incfees_total / BUDGET) * 100)
if DEBUG: print(f'Session Override SL Feature: ASPP={allsession_profits_perc} STP {SESSION_TAKE_PROFIT} SSL {SESSION_STOP_LOSS}')
if allsession_profits_perc >= float(SESSION_TAKE_PROFIT):
session_tpsl_override_msg = "Session TP Override target of " + str(SESSION_TAKE_PROFIT) + "% met. Sell all coins now!"
is_bot_running = False
if allsession_profits_perc <= float(SESSION_STOP_LOSS):
session_tpsl_override_msg = "Session SL Override target of " + str(SESSION_STOP_LOSS) + "% met. Sell all coins now!"
is_bot_running = False
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
# print(orders)
for coin in orders:
try:
coin_step_size = float(next(
filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(orders[coin][0]['symbol'])['filters'])
)['stepSize'])
except Exception as ExStepSize:
coin_step_size = .1
if not TEST_MODE:
coins_bought[coin] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['orderId'],
'timestamp': orders[coin]['timestamp'],
'bought_at': orders[coin]['avgPrice'],
'volume': orders[coin]['volume'],
'volume_debug': volume[coin],
'buyFeeBNB': orders[coin]['tradeFeeBNB'],
'buyFee': orders[coin]['tradeFeeUnit'] * orders[coin]['volume'],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin]["symbol"]} with ID {orders[coin]["orderId"]} placed and saved to file.')
else:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin][0]["symbol"]} with ID {orders[coin][0]["orderId"]} placed and saved to file.')
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def update_bot_stats():
global trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total
bot_stats = {
'total_capital' : str(TRADE_SLOTS * TRADE_TOTAL),
'botstart_datetime' : str(bot_started_datetime),
'historicProfitIncFees_Percent': historic_profit_incfees_perc,
'historicProfitIncFees_Total': historic_profit_incfees_total,
'tradeWins': trade_wins,
'tradeLosses': trade_losses,
}
#save session info for through session portability
with open(bot_stats_file_path, 'w') as file:
json.dump(bot_stats, file, indent=4)
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
# code below created by getsec <3
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if os.path.exists('signalsell_tickers.txt'):
os.remove('signalsell_tickers.txt')
for coin in coins_bought:
write_signallsell(coin.removesuffix(PAIR_WITH))
def write_log(logline):
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(LOG_FILE):
with open(LOG_FILE,'a+') as f:
f.write('Datetime\tType\tCoin\tVolume\tBuy Price\tCurrency\tSell Price\tProfit $\tProfit %\tSell Reason\n')
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
def write_signallsell(symbol):
with open('signalsell_tickers.txt','a+') as f:
f.write(f'{symbol}\n')
def remove_external_signals(fileext):
signals = glob.glob('signals/*.{fileext}')
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
def sell_all(msgreason, session_tspl_ovr = False):
global sell_all_coins
msg_discord(f'{str(datetime.now())} | SELL ALL COINS: {msgreason}')
# stop external signals so no buying/selling/pausing etc can occur
stop_signal_threads()
# sell all coins NOW!
sell_all_coins = True
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
# display final info to screen
last_price = get_price()
discordmsg = balance_report(last_price)
msg_discord(discordmsg)
def stop_signal_threads():
try:
for signalthread in signalthreads:
print(f'Terminating thread {str(signalthread.name)}')
signalthread.terminate()
except:
pass
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
Better than rounding
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
if __name__ == '__main__':
req_version = (3,9)
if sys.version_info[:2] < req_version:
print(f'This bot requires Python version 3.9 or higher/newer. You are running version {sys.version_info[:2]} - please upgrade your Python version!!')
sys.exit()
# Load arguments then parse settings
args = parse_args()
mymodule = {}
discord_msg_balance_data = ""
last_msg_discord_balance_date = datetime.now()
last_history_log_date = datetime.now()
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
# LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
HISTORY_LOG_FILE = "history.txt"
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
TRADE_TOTAL = parsed_config['trading_options']['TRADE_TOTAL']
TRADE_SLOTS = parsed_config['trading_options']['TRADE_SLOTS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
#COOLOFF_PERIOD = parsed_config['trading_options']['COOLOFF_PERIOD']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
# Code modified from DJCommie fork
# Load Session OVERRIDE values - used to STOP the bot when current session meets a certain STP or SSL value
SESSION_TPSL_OVERRIDE = parsed_config['trading_options']['SESSION_TPSL_OVERRIDE']
SESSION_TAKE_PROFIT = parsed_config['trading_options']['SESSION_TAKE_PROFIT']
SESSION_STOP_LOSS = parsed_config['trading_options']['SESSION_STOP_LOSS']
# Borrowed from DJCommie fork
# If TRUE, coin will only sell based on an external SELL signal
SELL_ON_SIGNAL_ONLY = parsed_config['trading_options']['SELL_ON_SIGNAL_ONLY']
# Discord integration
# Used to push alerts, messages etc to a discord channel
MSG_DISCORD = parsed_config['trading_options']['MSG_DISCORD']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'Loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
if MSG_DISCORD:
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
sell_all_coins = False
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
# path to the saved coins_bought file
coins_bought_file_path = file_prefix + 'coins_bought.json'
# The below mod was stolen and altered from GoGo's fork, a nice addition for keeping a historical history of profit across multiple bot sessions.
# path to the saved bot_stats file
bot_stats_file_path = file_prefix + 'bot_stats.json'
# use separate files for testing and live trading
LOG_FILE = file_prefix + LOG_FILE
HISTORY_LOG_FILE = file_prefix + HISTORY_LOG_FILE
bot_started_datetime = datetime.now()
total_capital_config = TRADE_SLOTS * TRADE_TOTAL
if os.path.isfile(bot_stats_file_path) and os.stat(bot_stats_file_path).st_size!= 0:
with open(bot_stats_file_path) as file:
bot_stats = json.load(file)
# load bot stats:
try:
bot_started_datetime = datetime.strptime(bot_stats['botstart_datetime'], '%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
print (f'Exception on reading botstart_datetime from {bot_stats_file_path}. Exception: {e}')
bot_started_datetime = datetime.now()
try:
total_capital = bot_stats['total_capital']
except Exception as e:
print (f'Exception on reading total_capital from {bot_stats_file_path}. Exception: {e}')
total_capital = TRADE_SLOTS * TRADE_TOTAL
historic_profit_incfees_perc = bot_stats['historicProfitIncFees_Percent']
historic_profit_incfees_total = bot_stats['historicProfitIncFees_Total']
trade_wins = bot_stats['tradeWins']
trade_losses = bot_stats['tradeLosses']
if total_capital != total_capital_config:
historic_profit_incfees_perc = (historic_profit_incfees_total / total_capital_config) * 100
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-C to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: Test mode is disabled in the configuration, you are using _LIVE_ funds.')
print('WARNING: Waiting 10 seconds before live trading as a security measure!')
time.sleep(10)
remove_external_signals('buy')
remove_external_signals('sell')
remove_external_signals('pause')
# load signalling modules
signalthreads = []
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
# t = threading.Thread(target=mymodule[module].do_work, args=())
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
# add process to a list. This is so the thread can be terminated at a later time
signalthreads.append(t)
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(f'Loading external signals exception: {e}')
# seed initial prices
get_price()
TIMEOUT_COUNT=0
READ_CONNECTERR_COUNT=0
BINANCE_API_EXCEPTION=0
while is_bot_running:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
if SESSION_TPSL_OVERRIDE:
check_total_session_profit(coins_bought, last_price)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
update_bot_stats()
except ReadTimeout as rt:
TIMEOUT_COUNT += 1
print(f'We got a timeout error from Binance. Re-loop. Connection Timeouts so far: {TIMEOUT_COUNT}')
except ConnectionError as ce:
READ_CONNECTERR_COUNT += 1
print(f'We got a connection error from Binance. Re-loop. Connection Errors so far: {READ_CONNECTERR_COUNT}')
except BinanceAPIException as bapie:
BINANCE_API_EXCEPTION += 1
print(f'We got an API error from Binance. Re-loop. API Errors so far: {BINANCE_API_EXCEPTION}.\nException:\n{bapie}')
except KeyboardInterrupt as ki:
# stop external signal threads
stop_signal_threads()
# ask user if they want to sell all coins
print(f'\n\n\n')
sellall = input(f'{txcolors.WARNING}Program execution ended by user!\n\nDo you want to sell all coins (y/N)?{txcolors.DEFAULT}')
if sellall.upper() == "Y":
# sell all coins
sell_all('Program execution ended by user!')
sys.exit(0)
if not is_bot_running:
if SESSION_TPSL_OVERRIDE:
print(f'')
print(f'')
print(f'{txcolors.WARNING}{session_tpsl_override_msg}{txcolors.DEFAULT}')
sell_all(session_tpsl_override_msg, True)
sys.exit(0)
else:
print(f'')
print(f'')
print(f'Bot terminated for some reason.') |
main.py | # -*- coding: utf-8 -*-
import threading
import time
import os
import random
from tools.box.downloadBook.spyder import aszwDownloader, aszwParser, aszwWriter
from tools.box.downloadBook.db import dbController
# 小说目录
home = "http://www.23zw.me/olread/68/68913/"
SLEEP_TIME = 0.5
SLEEP_TIME2 = 0.1
class SpiderMain(object):
def __init__(self):
self.downloader = aszwDownloader.Downloader()
self.parser = aszwParser.Parser()
self.cookies = dbController.dbc('bookwarehouse').getCookies()
def process_book(self, section_urls, title):
print('setction nume is:' + str(len(section_urls)))
i = 0
self.outputer = aszwWriter.Writer(len(section_urls))
def process_section(url, title):
try:
cookie = self.cookies[random.randint(0, 9)]
html_cont = self.downloader.m_download(url=url, cookie=cookie)
new_data = self.parser.parser_Section(html_cont)
self.outputer.collect_data(new_data, title)
print("第" + "%d" % new_data['section_title'] + "章" + "下载中...")
except Exception as e:
print(e)
print("下载失败!!!")
threads = []
while threads or section_urls:
# the crawl is still active
for thread in threads:
if not thread.is_alive():
# remove the stopped threads
threads.remove(thread)
while len(threads) < 20 and section_urls:
url = section_urls.pop()
thread = threading.Thread(target=process_section(url, title))
thread.setDaemon(True)
thread.start()
threads.append(thread)
time.sleep(SLEEP_TIME2)
'''for url in section_urls:
#try:
i += 1
html_cont = self.downloader.download(url)
print('Downloading section' + str(i))
new_data = self.parser.parser_Section(html_cont)
self.outputer.collect_data(new_data, title)
#except:
# print('Section ' + str(i) + ' downloading Error!')'''
self.outputer.output_html()
def craw(self, max_threads):
list_urls = self.parser.find_list_urls()
for list_url in list_urls:
book_urls = self.parser.find_books_urls(list_url)
threads = []
print("共有" + str(len(book_urls)) + "本书")
while threads or book_urls:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads) < max_threads and book_urls:
print('进程数为:' + str(len(threads)))
book_url = book_urls.pop()
section_urls, title = self.parser.find_section_urls(book_url)
if os.path.exists("C:/book/" + title + ".txt"):
print(title + '已下载')
else:
thread = threading.Thread(target=self.process_book(section_urls, title))
thread.setDaemon(True)
thread.start()
threads.append(thread)
time.sleep(SLEEP_TIME)
if __name__ == '__main__':
spider = SpiderMain()
spider.craw(20)
|
fake.py | # This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
"""
gpsfake.py -- classes for creating a controlled test environment around gpsd.
The gpsfake(1) regression tester shipped with gpsd is a trivial wrapper
around this code. For a more interesting usage example, see the
valgrind-audit script shipped with the gpsd code.
To use this code, start by instantiating a TestSession class. Use the
prefix argument if you want to run the daemon under some kind of run-time
monitor like valgrind or gdb. Here are some particularly useful possibilities:
valgrind --tool=memcheck --gen-suppressions=yes --leak-check=yes
Run under Valgrind, checking for malloc errors and memory leaks.
xterm -e gdb -tui --args
Run under gdb, controlled from a new xterm.
You can use the options argument to pass in daemon options; normally you will
use this to set the debug-logging level.
On initialization, the test object spawns an instance of gpsd with no
devices or clients attached, connected to a control socket.
TestSession has methods to attach and detch fake GPSes. The
TestSession class simulates GPS devices for you with objects composed
from a pty and a class instance that cycles sentences into the master side
from some specified logfile; gpsd reads the slave side. A fake GPS is
identified by the string naming its slave device.
TestSession also has methods to start and end client sessions. Daemon
responses to a client are fed to a hook function which, by default,
discards them. You can change the hook to sys.stdout.write() to dump
responses to standard output (this is what the gpsfake executable
does) or do something more exotic. A client session is identified by a
small integer that counts the number of client session starts.
There are a couple of convenience methods. TestSession.wait() does nothing,
allowing a specified number of seconds to elapse. TestSession.send()
ships commands to an open client session.
TestSession does not currently capture the daemon's log output. It is
run with -N, so the output will go to stderr (along with, for example,
Valgrind notifications).
Each FakeGPS instance tries to packetize the data from the logfile it
is initialized with. It uses the same packet-getter as the daeomon.
The TestSession code maintains a run queue of FakeGPS and gps.gs (client-
session) objects. It repeatedly cycles through the run queue. For each
client session object in the queue, it tries to read data from gpsd. For
each fake GPS, it sends one line of stored data. When a fake-GPS's
go predicate becomes false, the fake GPS is removed from the run queue.
There are two ways to use this code. The more deterministic is
non-threaded mode: set up your client sessions and fake GPS devices,
then call the run() method. The run() method will terminate when
there are no more objects in the run queue. Note, you must have
created at least one fake client or fake GPS before calling run(),
otherwise it will terminate immediately.
To allow for adding and removing clients while the test is running,
run in threaded mode by calling the start() method. This simply calls
the run method in a subthread, with locking of critical regions.
"""
import sys, os, time, signal, pty, termios # fcntl, array, struct
import exceptions, threading, socket
import gps
import packet as sniffer
# The two magic numbers below have to be derived from observation. If
# they're too high you'll slow the tests down a lot. If they're too low
# you'll get random spurious regression failures that usually look
# like lines missing from the end of the test output relative to the
# check file. These numbers might have to be adjusted upward on faster
# machines. The need for them may be symnptomatic of race conditions
# in the pty layer or elsewhere.
# Define a per-line delay on writes so we won't spam the buffers in
# the pty layer or gpsd itself. Removing this entirely was tried but
# caused failures under NetBSD. Values smaller than the system timer
# tick don't make any difference here.
WRITE_PAD = 0.001
# We delay briefly after a GPS source is exhausted before removing it.
# This should give its subscribers time to get gpsd's response before
# we call the cleanup code. Note that using fractional seconds in
# CLOSE_DELAY may have no effect; Python time.time() returns a float
# value, but it is not guaranteed by Python that the C implementation
# underneath will return with precision finer than 1 second. (Linux
# and *BSD return full precision.)
CLOSE_DELAY = 1
class TestLoadError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class TestLoad:
"Digest a logfile into a list of sentences we can cycle through."
def __init__(self, logfp, predump=False):
self.sentences = [] # This is the interesting part
if type(logfp) == type(""):
logfp = open(logfp, "r");
self.name = logfp.name
self.logfp = logfp
self.predump = predump
self.logfile = logfp.name
self.type = None
self.sourcetype = "pty"
self.serial = None
# Grab the packets
getter = sniffer.new()
#gps.packet.register_report(reporter)
type_latch = None
while True:
(len, ptype, packet) = getter.get(logfp.fileno())
if len <= 0:
break
elif ptype == sniffer.COMMENT_PACKET:
# Some comments are magic
if "Serial:" in packet:
# Change serial parameters
packet = packet[1:].strip()
try:
(xx, baud, params) = packet.split()
baud = int(baud)
if params[0] in ('7', '8'):
databits = int(params[0])
else:
raise ValueError
if params[1] in ('N', 'O', 'E'):
parity = params[1]
else:
raise ValueError
if params[2] in ('1', '2'):
stopbits = int(params[2])
else:
raise ValueError
except (ValueError, IndexError):
raise TestLoadError("bad serial-parameter spec in %s"%\
logfp.name)
self.serial = (baud, databits, parity, stopbits)
elif "UDP" in packet:
self.sourcetype = "UDP"
elif "%" in packet:
# Pass through for later interpretation
self.sentences.append(packet)
else:
if type_latch is None:
type_latch = ptype
if self.predump:
print `packet`
if not packet:
raise TestLoadError("zero-length packet from %s"%\
logfp.name)
self.sentences.append(packet)
# Look at the first packet to grok the GPS type
self.textual = (type_latch == sniffer.NMEA_PACKET)
if self.textual:
self.legend = "gpsfake: line %d: "
else:
self.legend = "gpsfake: packet %d"
class PacketError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class FakeGPS:
def __init__(self, testload, progress=None):
self.testload = testload
self.progress = progress
self.go_predicate = lambda: True
self.readers = 0
self.index = 0
self.progress("gpsfake: %s provides %d sentences\n" % (self.testload.name, len(self.testload.sentences)))
def feed(self):
"Feed a line from the contents of the GPS log to the daemon."
line = self.testload.sentences[self.index % len(self.testload.sentences)]
if "%Delay:" in line:
# Delay specified number of seconds
delay = line.split()[1]
time.sleep(int(delay))
# self.write has to be set by the derived class
self.write(line)
if self.progress:
self.progress("gpsfake: %s feeds %d=%s\n" % (self.testload.name, len(line), `line`))
time.sleep(WRITE_PAD)
self.index += 1
class FakePTY(FakeGPS):
"A FakePTY is a pty with a test log ready to be cycled to it."
def __init__(self, testload,
speed=4800, databits=8, parity='N', stopbits=1,
progress=None):
FakeGPS.__init__(self, testload, progress)
# Allow Serial: header to be overridden by explicit spped.
if self.testload.serial:
(speed, databits, parity, stopbits) = self.testload.serial
self.speed = speed
baudrates = {
0: termios.B0,
50: termios.B50,
75: termios.B75,
110: termios.B110,
134: termios.B134,
150: termios.B150,
200: termios.B200,
300: termios.B300,
600: termios.B600,
1200: termios.B1200,
1800: termios.B1800,
2400: termios.B2400,
4800: termios.B4800,
9600: termios.B9600,
19200: termios.B19200,
38400: termios.B38400,
57600: termios.B57600,
115200: termios.B115200,
230400: termios.B230400,
}
speed = baudrates[speed] # Throw an error if the speed isn't legal
(self.fd, self.slave_fd) = pty.openpty()
self.byname = os.ttyname(self.slave_fd)
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self.slave_fd)
cc[termios.VMIN] = 1
cflag &= ~(termios.PARENB | termios.PARODD | termios.CRTSCTS)
cflag |= termios.CREAD | termios.CLOCAL
iflag = oflag = lflag = 0
iflag &=~ (termios.PARMRK | termios.INPCK)
cflag &=~ (termios.CSIZE | termios.CSTOPB | termios.PARENB | termios.PARODD)
if databits == 7:
cflag |= termios.CS7
else:
cflag |= termios.CS8
if stopbits == 2:
cflag |= termios.CSTOPB
if parity == 'E':
iflag |= termios.INPCK
cflag |= termios.PARENB
elif parity == 'O':
iflag |= termios.INPCK
cflag |= termios.PARENB | termios.PARODD
ispeed = ospeed = speed
termios.tcsetattr(self.slave_fd, termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def read(self):
"Discard control strings written by gpsd."
# A tcflush implementation works on Linux but fails on OpenBSD 4.
termios.tcflush(self.fd, termios.TCIFLUSH)
# Alas, the FIONREAD version also works on Linux and fails on OpenBSD.
#try:
# buf = array.array('i', [0])
# fcntl.ioctl(self.master_fd, termios.FIONREAD, buf, True)
# n = struct.unpack('i', buf)[0]
# os.read(self.master_fd, n)
#except IOError:
# pass
def write(self, line):
os.write(self.fd, line)
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
termios.tcdrain(self.fd)
class FakeUDP(FakeGPS):
"A UDP broadcaster with a test log ready to be cycled to it."
def __init__(self, testload,
ipaddr, port,
progress=None):
FakeGPS.__init__(self, testload, progress)
self.ipaddr = ipaddr
self.port = port
self.byname = "udp://" + ipaddr + ":" + port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def read(self):
"Discard control strings written by gpsd."
pass
def write(self, line):
self.sock.sendto(line, (self.ipaddr, int(self.port)))
def drain(self):
"Wait for the associated device to drain (e.g. before closing)."
pass # shutdown() fails on UDP
class DaemonError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class DaemonInstance:
"Control a gpsd instance."
def __init__(self, control_socket=None):
self.sockfile = None
self.pid = None
if control_socket:
self.control_socket = control_socket
else:
self.control_socket = "/tmp/gpsfake-%d.sock" % os.getpid()
self.pidfile = "/tmp/gpsfake_pid-%s" % os.getpid()
def spawn(self, options, port, background=False, prefix=""):
"Spawn a daemon instance."
self.spawncmd = None
# Look for gpsd in GPSD_HOME env variable
if os.environ.get('GPSD_HOME'):
for path in os.environ['GPSD_HOME'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
# if we could not find it yet try PATH env variable for it
if not self.spawncmd:
if not '/usr/sbin' in os.environ['PATH']:
os.environ['PATH']=os.environ['PATH'] + ":/usr/sbin"
for path in os.environ['PATH'].split(':'):
_spawncmd = "%s/gpsd" % path
if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK):
self.spawncmd = _spawncmd
break
if not self.spawncmd:
raise DaemonError("Cannot execute gpsd: executable not found. Set GPSD_HOME env variable")
# The -b option to suppress hanging on probe returns is needed to cope
# with OpenBSD (and possibly other non-Linux systems) that don't support
# anything we can use to implement the FakeGPS.read() method
self.spawncmd += " -b -N -S %s -F %s -P %s %s" % (port, self.control_socket, self.pidfile, options)
if prefix:
self.spawncmd = prefix + " " + self.spawncmd.strip()
if background:
self.spawncmd += " &"
status = os.system(self.spawncmd)
if os.WIFSIGNALED(status) or os.WEXITSTATUS(status):
raise DaemonError("daemon exited with status %d" % status)
def wait_pid(self):
"Wait for the daemon, get its PID and a control-socket connection."
while True:
try:
fp = open(self.pidfile)
except IOError:
time.sleep(0.1)
continue
try:
fp.seek(0)
pidstr = fp.read()
self.pid = int(pidstr)
except ValueError:
time.sleep(0.5)
continue # Avoid race condition -- PID not yet written
fp.close()
break
def __get_control_socket(self):
# Now we know it's running, get a connection to the control socket.
if not os.path.exists(self.control_socket):
return None
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
self.sock.connect(self.control_socket)
except socket.error:
if self.sock:
self.sock.close()
self.sock = None
return self.sock
def is_alive(self):
"Is the daemon still alive?"
try:
os.kill(self.pid, 0)
return True
except OSError:
return False
def add_device(self, path):
"Add a device to the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("+%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def remove_device(self, path):
"Remove a device from the daemon's internal search list."
if self.__get_control_socket():
self.sock.sendall("-%s\r\n\x00" % path)
self.sock.recv(12)
self.sock.close()
def kill(self):
"Kill the daemon instance."
if self.pid:
try:
os.kill(self.pid, signal.SIGTERM)
# Raises an OSError for ESRCH when we've killed it.
while True:
os.kill(self.pid, signal.SIGTERM)
time.sleep(0.01)
except OSError:
pass
self.pid = None
class TestSessionError(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
class TestSession:
"Manage a session including a daemon with fake GPSes and clients."
def __init__(self, prefix=None, port=None, options=None, verbose=0, predump=False, udp=False):
"Initialize the test session by launching the daemon."
self.prefix = prefix
self.port = port
self.options = options
self.verbose = verbose
self.predump = predump
self.udp = udp
self.daemon = DaemonInstance()
self.fakegpslist = {}
self.client_id = 0
self.readers = 0
self.writers = 0
self.runqueue = []
self.index = 0
if port:
self.port = port
else:
self.port = gps.GPSD_PORT
self.progress = lambda x: None
self.reporter = lambda x: None
self.default_predicate = None
self.fd_set = []
self.threadlock = None
def spawn(self):
for sig in (signal.SIGQUIT, signal.SIGINT, signal.SIGTERM):
signal.signal(sig, lambda signal, frame: self.cleanup())
self.daemon.spawn(background=True, prefix=self.prefix, port=self.port, options=self.options)
self.daemon.wait_pid()
def set_predicate(self, pred):
"Set a default go predicate for the session."
self.default_predicate = pred
def gps_add(self, logfile, speed=19200, pred=None):
"Add a simulated GPS being fed by the specified logfile."
self.progress("gpsfake: gps_add(%s, %d)\n" % (logfile, speed))
if logfile not in self.fakegpslist:
testload = TestLoad(logfile, predump=self.predump)
if testload.sourcetype == "UDP" or self.udp:
newgps = FakeUDP(testload, ipaddr="127.0.0.1", port="5000",
progress=self.progress)
else:
newgps = FakePTY(testload, speed=speed,
progress=self.progress)
if pred:
newgps.go_predicate = pred
elif self.default_predicate:
newgps.go_predicate = self.default_predicate
self.fakegpslist[newgps.byname] = newgps
self.append(newgps)
newgps.exhausted = 0
self.daemon.add_device(newgps.byname)
return newgps.byname
def gps_remove(self, name):
"Remove a simulated GPS from the daemon's search list."
self.progress("gpsfake: gps_remove(%s)\n" % name)
self.fakegpslist[name].drain()
self.remove(self.fakegpslist[name])
self.daemon.remove_device(name)
del self.fakegpslist[name]
def client_add(self, commands):
"Initiate a client session and force connection to a fake GPS."
self.progress("gpsfake: client_add()\n")
newclient = gps.gps(port=self.port, verbose=self.verbose)
self.append(newclient)
newclient.id = self.client_id + 1
self.client_id += 1
self.progress("gpsfake: client %d has %s\n" % (self.client_id,newclient.device))
if commands:
self.initialize(newclient, commands)
return self.client_id
def client_remove(self, cid):
"Terminate a client session."
self.progress("gpsfake: client_remove(%d)\n" % cid)
for obj in self.runqueue:
if isinstance(obj, gps.gps) and obj.id == cid:
self.remove(obj)
return True
else:
return False
def wait(self, seconds):
"Wait, doing nothing."
self.progress("gpsfake: wait(%d)\n" % seconds)
time.sleep(seconds)
def gather(self, seconds):
"Wait, doing nothing but watching for sentences."
self.progress("gpsfake: gather(%d)\n" % seconds)
#mark = time.time()
time.sleep(seconds)
#if self.timings.c_recv_time <= mark:
# TestSessionError("no sentences received\n")
def cleanup(self):
"We're done, kill the daemon."
self.progress("gpsfake: cleanup()\n")
if self.daemon:
self.daemon.kill()
self.daemon = None
def run(self):
"Run the tests."
try:
self.progress("gpsfake: test loop begins\n")
while self.daemon:
# We have to read anything that gpsd might have tried
# to send to the GPS here -- under OpenBSD the
# TIOCDRAIN will hang, otherwise.
for device in self.runqueue:
if isinstance(device, FakeGPS):
device.read()
had_output = False
chosen = self.choose()
if isinstance(chosen, FakeGPS):
if chosen.exhausted and (time.time() - chosen.exhausted > CLOSE_DELAY):
self.gps_remove(chosen.byname)
self.progress("gpsfake: GPS %s removed\n" % chosen.byname)
elif not chosen.go_predicate(chosen.index, chosen):
if chosen.exhausted == 0:
chosen.exhausted = time.time()
self.progress("gpsfake: GPS %s ran out of input\n" % chosen.byname)
else:
chosen.feed()
elif isinstance(chosen, gps.gps):
if chosen.enqueued:
chosen.send(chosen.enqueued)
chosen.enqueued = ""
while chosen.waiting():
chosen.poll()
if chosen.valid & gps.PACKET_SET:
self.reporter(chosen.response)
had_output = True
else:
raise TestSessionError("test object of unknown type")
if not self.writers and not had_output:
self.progress("gpsfake: no writers and no output\n")
break
self.progress("gpsfake: test loop ends\n")
finally:
self.cleanup()
# All knowledge about locks and threading is below this line,
# except for the bare fact that self.threadlock is set to None
# in the class init method.
def append(self, obj):
"Add a producer or consumer to the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.append(obj)
if isinstance(obj, FakeGPS):
self.writers += 1
elif isinstance(obj, gps.gps):
self.readers += 1
if self.threadlock:
self.threadlock.release()
def remove(self, obj):
"Remove a producer or consumer from the object list."
if self.threadlock:
self.threadlock.acquire()
self.runqueue.remove(obj)
if isinstance(obj, FakeGPS):
self.writers -= 1
elif isinstance(obj, gps.gps):
self.readers -= 1
self.index = min(len(self.runqueue)-1, self.index)
if self.threadlock:
self.threadlock.release()
def choose(self):
"Atomically get the next object scheduled to do something."
if self.threadlock:
self.threadlock.acquire()
chosen = self.index
self.index += 1
self.index %= len(self.runqueue)
if self.threadlock:
self.threadlock.release()
return self.runqueue[chosen]
def initialize(self, client, commands):
"Arrange for client to ship specified commands when it goes active."
client.enqueued = ""
if not self.threadlock:
client.send(commands)
else:
client.enqueued = commands
def start(self):
self.threadlock = threading.Lock()
threading.Thread(target=self.run)
# End
|
main.py | import sublime, sublime_plugin
import re, urllib, shutil, traceback, threading, time, os, hashlib, json, multiprocessing, shlex
class Util(object) :
multiprocessing_list = []
@staticmethod
def download_and_save(url, where_to_save) :
if where_to_save :
try :
request = urllib.request.Request(url)
request.add_header('User-agent', r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1')
with urllib.request.urlopen(request) as response :
with open(where_to_save, 'wb+') as out_file :
shutil.copyfileobj(response, out_file)
return True
except Exception as e:
traceback.print_exc()
return False
@staticmethod
def open_json(path):
with open(path) as json_file :
try :
return json.load(json_file)
except Exception as e :
print("Error: "+traceback.format_exc())
return None
@staticmethod
def check_thread_is_alive(thread_name) :
for thread in threading.enumerate() :
if thread.getName() == thread_name and thread.is_alive() :
return True
return False
@staticmethod
def create_and_start_thread(target, thread_name="", args=[], kwargs={}, daemon=True) :
if not Util.check_thread_is_alive(thread_name) :
thread = threading.Thread(target=target, name=thread_name, args=args, kwargs=kwargs, daemon=daemon)
thread.start()
return thread
return None
@staticmethod
def check_process_is_alive(process_name) :
Util.multiprocessing_list
for process in Util.multiprocessing_list :
if process.name == process_name :
if process.is_alive() :
return True
else :
Util.multiprocessing_list.remove(process)
return False
@staticmethod
def create_and_start_process(target, process_name="", args=[], kwargs={}, daemon=True) :
Util.multiprocessing_list
if not Util.check_process_is_alive(process_name) :
process = multiprocessing.Process(target=target, name=process_name, args=args, kwargs=kwargs, daemon=daemon)
process.start()
Util.multiprocessing_list.append(process)
return process
return None
@staticmethod
def setTimeout(time, func):
timer = threading.Timer(time, func)
timer.start()
return timer
@staticmethod
def checksum_sha1(fname):
hash_sha1 = hashlib.sha1()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
return hash_sha1.hexdigest()
@staticmethod
def checksum_sha1_equalcompare(fname1, fname2):
return Util.checksum_sha1(fname1) == Util.checksum_sha1(fname2)
@staticmethod
def split_string_and_find(string_to_split, search_value, split_delimiter=" ") :
string_splitted = string_to_split.split(split_delimiter)
return Util.indexOf(string_splitted, search_value)
@staticmethod
def split_string_and_find_on_multiple(string_to_split, search_values, split_delimiter=" ") :
string_splitted = string_to_split.split(split_delimiter)
for search_value in search_values :
index = Util.indexOf(string_splitted, search_value)
if index >= 0 :
return index
return -1
@staticmethod
def split_string_and_findLast(string_to_split, search_value, split_delimiter=" ") :
string_splitted = string_to_split.split(split_delimiter)
return Util.lastIndexOf(string_splitted, search_value)
@staticmethod
def indexOf(list_to_search, search_value) :
index = -1
try :
index = list_to_search.index(search_value)
except Exception as e:
pass
return index
@staticmethod
def lastIndexOf(list_to_search, search_value) :
index = -1
list_to_search_reversed = reversed(list_to_search)
list_length = len(list_to_search)
try :
index = next(i for i,v in zip(range(list_length-1, 0, -1), list_to_search_reversed) if v == search_value)
except Exception as e:
pass
return index
@staticmethod
def firstIndexOfMultiple(list_to_search, search_values) :
index = -1
string = ""
for search_value in search_values :
index_search = Util.indexOf(list_to_search, search_value)
if index_search >= 0 and index == -1 :
index = index_search
string = search_value
elif index_search >= 0 :
index = min(index, index_search)
string = search_value
return {
"index": index,
"string": string
}
@staticmethod
def find_and_get_pre_string_and_first_match(string, search_value) :
result = None
index = Util.indexOf(string, search_value)
if index >= 0 :
result = string[:index+len(search_value)]
return result
@staticmethod
def find_and_get_pre_string_and_matches(string, search_value) :
result = None
index = Util.indexOf(string, search_value)
if index >= 0 :
result = string[:index+len(search_value)]
string = string[index+len(search_value):]
count_occ = string.count(search_value)
i = 0
while i < count_occ :
result += " "+search_value
i = i + 1
return result
@staticmethod
def get_region_scope_first_match(view, scope, selection, selector) :
scope = Util.find_and_get_pre_string_and_first_match(scope, selector)
if scope :
for region in view.find_by_selector(scope) :
if region.contains(selection):
selection.a = region.begin()
selection.b = selection.a
return {
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
}
return None
@staticmethod
def get_region_scope_last_match(view, scope, selection, selector) :
scope = Util.find_and_get_pre_string_and_matches(scope, selector)
if scope :
for region in view.find_by_selector(scope) :
if region.contains(selection):
selection.a = region.begin()
selection.b = selection.a
return {
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
}
return None
@staticmethod
def find_regions_on_same_depth_level(view, scope, selection, selectors, depth_level, forward) :
scope_splitted = scope.split(" ")
regions = list()
add_unit = 1 if forward else -1
if len(scope_splitted) >= depth_level :
for selector in selectors :
while Util.indexOf(scope_splitted, selector) == -1 :
if selection.a == 0 or len(scope_splitted) < depth_level :
return list()
selection.a = selection.a + add_unit
selection.b = selection.a
scope = view.scope_name(selection.begin()).strip()
scope_splitted = scope.split(" ")
region = view.extract_scope(selection.begin())
regions.append({
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
})
return regions
@staticmethod
def get_current_region_scope(view, selection) :
scope = view.scope_name(selection.begin()).strip()
for region in view.find_by_selector(scope) :
if region.contains(selection):
selection.a = region.begin()
selection.b = selection.a
return {
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
}
return None
@staticmethod
def get_parent_region_scope(view, selection) :
scope = view.scope_name(selection.begin()).strip()
scope = " ".join(scope.split(" ")[:-1])
for region in view.find_by_selector(scope) :
if region.contains(selection):
selection.a = region.begin()
selection.b = selection.a
return {
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
}
return None
@staticmethod
def get_specified_parent_region_scope(view, selection, parent) :
scope = view.scope_name(selection.begin()).strip()
scope = scope.split(" ")
index_parent = Util.lastIndexOf(scope, parent)
scope = " ".join(scope[:index_parent+1])
for region in view.find_by_selector(scope) :
if region.contains(selection):
selection.a = region.begin()
selection.b = selection.a
return {
"scope": scope,
"region": region,
"region_string": view.substr(region),
"region_string_stripped": view.substr(region).strip(),
"selection": selection
}
return None
@staticmethod
def cover_regions(regions) :
first_region = regions[0]
other_regions = regions[1:]
for region in other_regions :
first_region = first_region.cover(region)
return first_region
@staticmethod
def rowcol_to_region(view, row, endrow, col, endcol):
start = view.text_point(row, col)
end = view.text_point(endrow, endcol)
return sublime.Region(start, end)
@staticmethod
def trim_Region(view, region):
new_region = sublime.Region(region.begin(), region.end())
while(view.substr(new_region).startswith(" ") or view.substr(new_region).startswith("\n")):
new_region.a = new_region.a + 1
while(view.substr(new_region).endswith(" ") or view.substr(new_region).startswith("\n")):
new_region.b = new_region.b - 1
return new_region
@staticmethod
def selection_in_js_scope(view, point = -1, except_for = ""):
try :
sel_begin = view.sel()[0].begin() if point == -1 else point
return view.match_selector(
sel_begin,
'source.js ' + except_for
) or view.match_selector(
sel_begin,
'source.js.embedded.html ' + except_for
)
except IndexError as e:
return False
@staticmethod
def replace_with_tab(view, region, pre="", after="", add_to_each_line_before="", add_to_each_line_after="") :
lines = view.substr(region).split("\n")
body = list()
empty_line = 0
for line in lines :
if line.strip() == "" :
empty_line = empty_line + 1
if empty_line == 2 :
empty_line = 1 # leave at least one empty line
continue
else :
empty_line = 0
line = "\t"+add_to_each_line_before+line+add_to_each_line_after
body.append(line)
if body[len(body)-1].strip() == "" :
del body[len(body)-1]
body = "\n".join(body)
return pre+body+after
@staticmethod
def replace_without_tab(view, region, pre="", after="", add_to_each_line_before="", add_to_each_line_after="") :
lines = view.substr(region).split("\n")
body = list()
empty_line = 0
for line in lines :
if line.strip() == "" :
empty_line = empty_line + 1
if empty_line == 2 :
empty_line = 1 # leave at least one empty line
continue
else :
empty_line = 0
body.append(add_to_each_line_before+line+add_to_each_line_after)
if body[len(body)-1].strip() == "" :
del body[len(body)-1]
body = "\n".join(body)
return pre+body+after
@staticmethod
def get_whitespace_from_line_begin(view, region) :
return " " * ( region.begin() - view.line(region).begin() )
@staticmethod
def add_whitespace_indentation(view, region, string, replace="\t", add_whitespace_end=True) :
whitespace = Util.get_whitespace_from_line_begin(view, region)
if replace == "\n" :
lines = string.split("\n")
lines = [whitespace+line for line in lines]
lines[0] = lines[0].lstrip()
string = "\n".join(lines)
return string
if add_whitespace_end :
lines = string.split("\n")
lines[len(lines)-1] = whitespace + lines[-1:][0]
string = "\n".join(lines)
string = re.sub("(["+replace+"]+)", whitespace+r"\1", string)
return string
@staticmethod
def go_to_centered(view, row, col):
while view.is_loading() :
time.sleep(.1)
point = view.text_point(row, col)
view.sel().clear()
view.sel().add(point)
view.show_at_center(point)
@staticmethod
def wait_view(view, fun):
while view.is_loading() :
time.sleep(.1)
fun()
@staticmethod
def move_content_to_parent_folder(path):
for filename in os.listdir(path):
shutil.move(os.path.join(path, filename), os.path.dirname(path))
os.rmdir(path)
@staticmethod
def merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
@staticmethod
def removeItemIfExists(arr, item):
if item in arr: arr.remove(item)
@staticmethod
def getListItemIfExists(arr, item):
if item in arr :
return item
return None
@staticmethod
def delItemIfExists(obj, key):
try :
del obj[key]
except KeyError as e:
pass
@staticmethod
def getDictItemIfExists(obj, key):
try :
return obj[key]
except KeyError as e:
pass
return None
@staticmethod
def create_and_show_panel(output_panel_name, window = None, syntax=""):
window = sublime.active_window() if not window else window
panel = window.create_output_panel(output_panel_name, False)
panel.set_read_only(True)
if syntax :
panel.set_syntax_file(syntax)
window.run_command("show_panel", {"panel": "output."+output_panel_name})
return panel
@staticmethod
def execute(command, command_args, chdir="", wait_terminate=True, func_stdout=None, args_func_stdout=[]) :
if sublime.platform() == 'windows':
args = [command] + command_args
else :
command_args_list = list()
for command_arg in command_args :
command_args_list.append(shlex.quote(command_arg))
command_args = " ".join(command_args_list)
args = shlex.quote(command)+" "+command_args
#print(args)
if wait_terminate :
env = os.environ.copy()
env["PATH"] = env["PATH"] + javascriptCompletions.get("PATH")
shell = None if sublime.platform() == 'windows' else '/bin/bash'
with subprocess.Popen(args, shell=True, executable=shell, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=(None if not chdir else chdir)) as p:
lines_output = []
lines_error = []
thread_output = Util.create_and_start_thread(Util._wrapper_func_stdout_listen_output, "", (p, None, [], lines_output))
thread_error = Util.create_and_start_thread(Util._wrapper_func_stdout_listen_error, "", (p, None, [], lines_error))
if thread_output:
thread_output.join()
if thread_error:
thread_error.join()
lines = "\n".join(lines_output) + "\n" + "\n".join(lines_error)
return [True if p.wait() == 0 else False, lines]
elif not wait_terminate and func_stdout :
return Util.create_and_start_thread(Util._wrapper_func_stdout, "", (args, func_stdout, args_func_stdout, chdir))
@staticmethod
def _wrapper_func_stdout(args, func_stdout, args_func_stdout=[], chdir=""):
env = os.environ.copy()
env["PATH"] = env["PATH"] + javascriptCompletions.get("PATH")
shell = None if sublime.platform() == 'windows' else '/bin/bash'
with subprocess.Popen(args, shell=True, executable=shell, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, preexec_fn=os.setsid, cwd=(None if not chdir else chdir)) as p:
func_stdout(None, p, *args_func_stdout)
thread_output = Util.create_and_start_thread(Util._wrapper_func_stdout_listen_output, "", (p, func_stdout, args_func_stdout))
thread_error = Util.create_and_start_thread(Util._wrapper_func_stdout_listen_error, "", (p, func_stdout, args_func_stdout))
if thread_output:
thread_output.join()
if thread_error:
thread_error.join()
if p.wait() == 0:
func_stdout("OUTPUT-SUCCESS", p, *args_func_stdout)
else :
func_stdout("OUTPUT-ERROR", p, *args_func_stdout)
func_stdout("OUTPUT-DONE", p, *args_func_stdout)
@staticmethod
def _wrapper_func_stdout_listen_output(process, func_stdout=None, args_func_stdout=[], lines_output=[]):
char = b""
line = b""
while True :
char = process.stdout.read(1)
if not char :
break
if not char.endswith(b'\n') :
line = line + char
else :
line = line + char
line = codecs.decode(line, "utf-8", "ignore").strip()
line = re.sub(r'\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))', '', line)
line = re.sub(r'[\n\r]', '\n', line)
lines_output.append(line)
line = line + ( b"\n" if type(line) is bytes else "\n" )
if func_stdout :
func_stdout(line, process, *args_func_stdout)
line = b""
char = b""
@staticmethod
def _wrapper_func_stdout_listen_error(process, func_stdout=None, args_func_stdout=[], lines_error=[]):
char = b""
line = b""
while True :
char = process.stderr.read(1)
if not char :
break
if not char.endswith(b'\n') :
line = line + char
else :
line = line + char
line = codecs.decode(line, "utf-8", "ignore").strip()
line = re.sub(r'\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))', '', line)
line = re.sub(r'[\n\r]', '\n', line)
lines_error.append(line)
line = line + ( b"\n" if type(line) is bytes else "\n" )
if func_stdout :
func_stdout(line, process, *args_func_stdout)
line = b""
char = b""
@staticmethod
def nested_lookup(key, values, document, wild=False):
"""Lookup a key in a nested document, return a list of values"""
return list(Util._nested_lookup(key, values, document, wild=wild))
@staticmethod
def _nested_lookup(key, values, document, wild=False):
"""Lookup a key in a nested document, yield a value"""
if isinstance(document, list):
for d in document:
for result in Util._nested_lookup(key, values, d, wild=wild):
yield result
if isinstance(document, dict):
for k, v in document.items():
if values and v in values and (key == k or (wild and key.lower() in k.lower())):
yield document
elif not values and key == k or (wild and key.lower() in k.lower()):
yield document
elif isinstance(v, dict):
for result in Util._nested_lookup(key, values, v, wild=wild):
yield result
elif isinstance(v, list):
for d in v:
for result in Util._nested_lookup(key, values, d, wild=wild):
yield result |
time_utils.py | # coding:utf-8
'''
@author = super_fazai
@File : time_utils.py
@Time : 2018/7/13 18:02
@connect : superonesfazai@gmail.com
'''
import time
__all__ = [
'get_shanghai_time', # 时区处理,得到上海时间
'timestamp_to_regulartime', # 时间戳转规范的时间字符串
'string_to_datetime', # 将字符串转换成时间
'datetime_to_timestamp', # datetime转timestamp
'fz_timer', # 一个装饰器或者上下文管理器, 用于计算某函数耗时
'fz_set_timeout', # 可以给任意可能会hang住的函数添加超时功能[这个功能在编写外部API调用, 网络爬虫, 数据库查询的时候特别有用]
]
def get_shanghai_time():
'''
时区处理,得到上海时间
:return: datetime类型
'''
import pytz
import datetime
import re
# TODO 时区处理,时间处理到上海时间
# pytz查询某个国家时区
# country_timezones_list = pytz.country_timezones('cn')
# print(country_timezones_list)
tz = pytz.timezone('Asia/Shanghai') # 创建时区对象
now_time = datetime.datetime.now(tz)
# 处理为精确到秒位,删除时区信息
now_time = re.compile(r'\..*').sub('', str(now_time))
# 将字符串类型转换为datetime类型
now_time = datetime.datetime.strptime(now_time, '%Y-%m-%d %H:%M:%S')
return now_time
def timestamp_to_regulartime(timestamp):
'''
将时间戳转换成时间
'''
import time
# 利用localtime()函数将时间戳转化成localtime的格式
# 利用strftime()函数重新格式化时间
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp)))
def string_to_datetime(string):
'''
将字符串转换成datetime
:param string:
:return:
'''
import datetime
return datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
def datetime_to_timestamp(_dateTime):
'''
把datetime类型转外时间戳形式
:param _dateTime:
:return: int
'''
import time
return int(time.mktime(_dateTime.timetuple()))
class fz_timer(object):
"""
A timer can time how long does calling take as 上下文管理器 or 装饰器.
If assign ``print_func`` with ``sys.stdout.write``, ``logger.info`` and so on,
timer will print the spent time.
用法: eg:
import sys
@fz_timer(print_func=sys.stdout.write)
def tmp():
get_shanghai_time()
tmp()
"""
def __init__(self, print_func=None):
'''
:param print_func: sys.stdout.write | logger.info
'''
self.elapsed = None
self.print_func = print_func
def __enter__(self):
self.start = time.time()
def __exit__(self, *_):
self.elapsed = time.time() - self.start
if self.print_func:
self.print_func(self.__str__())
def __call__(self, fun):
def wrapper(*args, **kwargs):
with self:
return fun(*args, **kwargs)
return wrapper
def __str__(self):
return 'Spent time: {}s'.format(self.elapsed)
class TimeoutError(Exception):
pass
def fz_set_timeout(seconds, error_message='函数执行超时!'):
'''
可以给任意可能会hang住的函数添加超时功能[这个功能在编写外部API调用, 爬虫, 数据库查询的时候特别有用]
用法: eg:
from time import sleep
@fz_set_timeout(seconds=2)
def tmp():
sleep(3)
tmp()
:param seconds: 设置超时时间
:param error_message: 显示的错误信息
:return: None | Exception: 自定义的超时异常TimeoutError
'''
import functools
from threading import Thread
def decorated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
share = [TimeoutError(error_message)]
def func_with_except():
try:
share[0] = func(*args, **kwargs)
except Exception as e:
share[0] = e
t = Thread(target=func_with_except)
t.daemon = True
try:
t.start()
t.join(seconds)
except Exception as e:
raise e
result = share[0]
if isinstance(result, BaseException):
raise result
return result
return wrapper
return decorated
|
server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import signal
import sys
import os
import time
from rpi_ws281x import *
from websocket_server import WebsocketServer
import logging
import threading
# Configuration
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
WS2812TH = None
class WS2812Thread(object):
def __init__(self, cmd, color, strip):
self.command = cmd
self.new_color = color
self.strip = strip
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def changeVars(self, cmd, color):
self.command = cmd
self.new_color = color
# Define functions which animate LEDs in various ways.
def colorWipe(self, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(self, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, color)
self.strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, 0)
def wheel(self, pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(self, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel((i+j) & 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(self, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel((int(i * 256 / self.strip.numPixels()) + j) & 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(self, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, self.wheel((i+j) % 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, 0)
def run(self):
while True:
if self.command == 0:
self.colorWipe(self.new_color)
if self.command == 1:
self.rainbow()
if self.command == 2:
self.rainbowCycle()
if self.command == 3:
self.theaterChaseRainbow()
#time.sleep(10)
self.colorWipe(Color(0,0,0), 10)
def messageReceived(client, server, message):
#print(message)
cmd = message.split(" ")
newColor = Color(0,0,0)
if len(cmd) > 1:
newColor = Color(int(cmd[1]), int(cmd[2]), int(cmd[3]))
WS2812TH.changeVars(int(cmd[0]), newColor)
# Main program logic follows:
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
strip.begin()
WS2812TH = WS2812Thread(0, Color(0, 0, 0), strip)
server = WebsocketServer(80, host='0.0.0.0', loglevel=logging.INFO)
server.set_fn_message_received(messageReceived)
server.run_forever()
|
term.py | # -*- coding: utf-8 -*-
import os
import subprocess
import sys
import threading
import time
import uuid
import click
import six
from dagster import seven
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
class Spinner(object):
'''Spinning CLI prompt, shown while long-running activity is in flight.
From: https://stackoverflow.com/a/39504463/11295366
'''
busy = False
delay = 0.08
spinner = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
@staticmethod
def spinning_cursor():
while 1:
for cursor in Spinner.spinner:
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
class Term(object):
ERROR_PREFIX = u'❌ '
FATAL_PREFIX = u'💣 '
INFO_PREFIX = u'ℹ️ '
SUCCESS_PREFIX = u'✅ '
WAITING_PREFIX = u'⌛ '
WARNING_PREFIX = u'⚠️ '
@staticmethod
def fatal(msg):
click.echo(click.style(Term.FATAL_PREFIX + msg, fg='red'), err=True)
sys.exit(1)
@staticmethod
def error(msg):
click.echo(click.style(Term.ERROR_PREFIX + msg, fg='red'))
@staticmethod
def info(msg):
click.echo(click.style(Term.INFO_PREFIX + msg, fg='blue'))
@staticmethod
def success(msg):
click.echo(click.style(Term.SUCCESS_PREFIX + msg, fg='green'))
@staticmethod
def waiting(msg):
click.echo(click.style(Term.WAITING_PREFIX + msg, fg='yellow'))
@staticmethod
def warning(msg):
click.echo(click.style(Term.WARNING_PREFIX + msg, fg='yellow'))
@staticmethod
def rewind():
sys.stdout.write(CURSOR_UP_ONE)
sys.stdout.write(ERASE_LINE)
def run_remote_cmd(key_file_path, host, cmd, quiet=False):
ssh_cmd = 'ssh -o StrictHostKeyChecking=no -i %s ubuntu@%s -t "%s"' % (key_file_path, host, cmd)
if quiet:
return subprocess.check_output(ssh_cmd, shell=True)
else:
return subprocess.call(ssh_cmd, shell=True)
def rsync_to_remote(key_file_path, local_path, remote_host, remote_path):
remote_user = 'ubuntu'
rsync_command = [
'rsync',
'-avL',
'--progress',
# Exclude a few common paths
'--exclude',
'\'.pytest_cache\'',
'--exclude',
'\'.git\'',
'--exclude',
'\'__pycache__\'',
'--exclude',
'\'*.pyc\'',
'-e',
'"ssh -o StrictHostKeyChecking=no -i %s"' % key_file_path,
local_path,
'%s@%s:%s' % (remote_user, remote_host, remote_path),
]
Term.info('rsyncing local path %s to %s:%s' % (local_path, remote_host, remote_path))
click.echo('\n' + ' '.join(rsync_command) + '\n')
subprocess.call(' '.join(rsync_command), shell=True)
def remove_ssh_key(key_file_path):
# We have to clean up after ourselves to avoid "Too many authentication failures" issue.
Term.waiting('Removing SSH key from authentication agent...')
# AWS only gives us the private key contents; ssh-add uses the private key for adding but the
# public key for removing
try:
public_keys = six.ensure_str(subprocess.check_output(['ssh-add', '-L'])).strip().split('\n')
except subprocess.CalledProcessError:
Term.rewind()
Term.info('No identities found, skipping')
return True
filtered_public_keys = [key for key in public_keys if key_file_path in key]
public_key = filtered_public_keys[0] if filtered_public_keys else None
if public_key:
tmp_pub_file = os.path.join(
seven.get_system_temp_directory(), uuid.uuid4().hex + '-tmp-pubkey'
)
with open(tmp_pub_file, 'wb') as f:
f.write(six.ensure_binary(public_key))
res = subprocess.Popen(
['ssh-add', '-d', tmp_pub_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
).communicate()
res = six.ensure_str(res[0])
os.unlink(tmp_pub_file)
if 'Identity removed' in res:
Term.rewind()
Term.success('key deleted successfully')
return True
else:
Term.warning('Could not remove key, error: %s' % res)
return False
else:
Term.rewind()
Term.info('key not found, skipping')
return False
return True
|
workers.py | # curio/workers.py
#
# Functions for performing work outside of curio. This includes
# running functions in threads, processes, and executors from the
# concurrent.futures module.
__all__ = ['run_in_executor', 'run_in_thread', 'run_in_process', 'block_in_thread']
# -- Standard Library
import sys
import multiprocessing
import threading
import traceback
import signal
from collections import Counter, defaultdict
# -- Curio
from .errors import CancelledError
from .traps import _future_wait, _get_kernel
from . import sync
from .channel import Connection
# Code to embed a traceback in a remote exception. This is borrowed
# straight from multiprocessing.pool. Copied here to avoid possible
# confusion when reading the traceback message (it will identify itself
# as originating from curio as opposed to multiprocessing.pool).
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
async def run_in_executor(exc, callable, *args):
'''
Run callable(*args) in an executor such as
ThreadPoolExecutor or ProcessPoolExecutor from the
concurrent.futures module. Be aware that on cancellation, any
worker thread or process that was handling the request will
continue to run to completion as a kind of zombie-- possibly
rendering the executor unusable for subsequent work.
This function is provided for compatibility with
concurrent.futures, but is not the recommend approach for running
blocking or cpu-bound work in curio. Use the run_in_thread() or
run_in_process() methods instead.
'''
future = exc.submit(callable, *args)
await _future_wait(future)
return future.result()
MAX_WORKER_THREADS = 64
async def reserve_thread_worker():
'''
Reserve a thread pool worker
'''
kernel = await _get_kernel()
if not hasattr(kernel, 'thread_pool'):
kernel.thread_pool = WorkerPool(ThreadWorker, MAX_WORKER_THREADS)
kernel._call_at_shutdown(kernel.thread_pool.shutdown)
return (await kernel.thread_pool.reserve())
async def run_in_thread(callable, *args, call_on_cancel=None):
'''
Run callable(*args) in a separate thread and return the result. If
cancelled, be aware that the requested callable may or may not have
executed. If it start running, it will run fully to completion
as a kind of zombie.
'''
worker = None
try:
worker = await reserve_thread_worker()
return await worker.apply(callable, args, call_on_cancel)
finally:
if worker:
await worker.release()
# Support for blocking in threads.
#
# Discussion:
#
# The run_in_thread() function can be used to run any synchronous function
# in a separate thread. However, certain kinds of operations are
# inherently unsafe. For example, consider a worker task that wants
# to wait on a threading Event like this:
#
# evt = threading.Event() # Foreign Event...
#
# async def worker():
# await run_in_thread(evt.wait)
# print('Alive!')
#
# Now suppose Curio spins up a huge number of workers:
#
# for n in range(1000):
# await spawn(worker())
#
# At this point, you're in a bad situation. The worker tasks have all
# called run_in_thread() and are blocked indefinitely. Because the
# pool of worker threads is limited, you've exhausted all available
# resources. Nobody can now call run_in_thread() without blocking.
# There's a pretty good chance that your code is permanently
# deadlocked. There are dark clouds.
#
# This problem can be solved by wrapping run_in_thread() with a
# semaphore. Like this:
#
# _barrier = curio.Semaphore()
#
# async def worker():
# async with _barrier:
# await run_in_thread(evt.wait)
#
# However, to make it much more convenient, we can take care of
# a lot of fiddly details. We can cache the requested callable,
# build a set of semaphores and synchronize things in the background.
# That's what the block_in_thread() function is doing. For example:
#
# async def worker():
# await block_in_thread(evt.wait)
# print('Alive!')
#
# Unlike run_in_thread(), spawning up 1000 workers creates a
# situation where only 1 worker is actually blocked in a thread.
# The other 999 workers are blocked on a semaphore waiting for service.
_pending = Counter()
_barrier = defaultdict(sync.Semaphore)
async def block_in_thread(callable, *args, call_on_cancel=None):
'''
Run callable(*args) in a thread with the expectation that the
operation is going to block for an indeterminate amount of time.
Guarantees that at most only one background thread is used
regardless of how many curio tasks are actually waiting on the
same callable (e.g., if 1000 Curio tasks all decide to call
block_on_thread on the same callable, they'll all be handled by a
single thread). Primary use of this function is on foreign locks,
queues, and other synchronization primitives where you have to use
a thread, but you just don't have any idea when the operation will
complete.
'''
if hasattr(callable, '__self__'):
call_key = (callable.__name__, id(callable.__self__))
else:
call_key = id(callable)
_pending[call_key] += 1
async with _barrier[call_key]:
try:
return await run_in_thread(callable, *args, call_on_cancel=call_on_cancel)
finally:
_pending[call_key] -= 1
if not _pending[call_key]:
del _pending[call_key]
del _barrier[call_key]
MAX_WORKER_PROCESSES = multiprocessing.cpu_count()
async def run_in_process(callable, *args):
'''
Run callable(*args) in a separate process and return the
result. In the event of cancellation, the worker process is
immediately terminated.
The worker process is created using multiprocessing.Process().
Communication with the process uses multiprocessing.Pipe() and an
asynchronous message passing channel. All function arguments and
return values are seralized using the pickle module. When
cancelled, the Process.terminate() method is used to kill the
worker process. This results in a SIGTERM signal being sent to
the process.
The handle_cancellation flag, if True, indicates that you intend
to manage the worker cancellation yourself. This an advanced
option. Any resulting CancelledError has 'task' and 'worker'
attributes. task is a background task that's supervising the
still executing work. worker is the associated process.
The worker process is a separate isolated Python interpreter.
Nothing should be assumed about its global state including shared
variables, files, or connections.
'''
kernel = await _get_kernel()
if not hasattr(kernel, 'process_pool'):
kernel.process_pool = WorkerPool(ProcessWorker, MAX_WORKER_PROCESSES)
kernel._call_at_shutdown(kernel.process_pool.shutdown)
worker = None
try:
worker = await kernel.process_pool.reserve()
return await worker.apply(callable, args)
finally:
if worker:
await worker.release()
# The _FutureLess class is a custom "Future" implementation solely for
# use by curio. It is used by the ThreadWorker class below and
# provides only the minimal set of functionality needed to transmit a
# result back to the curio kernel. Unlike the normal Future class,
# this version doesn't require any thread synchronization or
# notification support. By eliminating that, the overhead associated
# with the handoff between curio tasks and threads is substantially
# faster.
class _FutureLess(object):
__slots__ = ('_callback', '_exception', '_result')
def set_result(self, result):
self._result = result
self._callback(self)
def set_exception(self, exc):
self._exception = exc
self._callback(self)
def result(self):
try:
return self._result
except AttributeError:
raise self._exception from None
def add_done_callback(self, func):
self._callback = func
def cancel(self):
pass
# A ThreadWorker represents a thread that performs work on behalf of a
# curio task. A curio task initiates work by executing the
# apply() method. This passes the request to a background thread that
# executes it. While this takes place, the curio task blocks, waiting
# for a result to be set on an internal Future.
class ThreadWorker(object):
'''
Worker that executes a callable on behalf of a curio task in a separate thread.
'''
def __init__(self, pool):
self.thread = None
self.start_evt = None
self.lock = None
self.request = None
self.terminated = False
self.pool = pool
def _launch(self):
self.start_evt = threading.Event()
self.thread = threading.Thread(target=self.run_worker, daemon=True)
self.thread.start()
def run_worker(self):
while True:
self.start_evt.wait()
self.start_evt.clear()
# If there is no pending request, but we were signalled to
# start, it means terminate.
if not self.request:
return
# Run the request
self.request()
async def release(self):
if self.pool:
await self.pool.release(self)
def shutdown(self):
self.terminated = True
self.request = None
if self.start_evt:
self.start_evt.set()
async def apply(self, func, args=(), call_on_cancel=None):
'''
Run the callable func in a separate thread and return the result.
'''
if self.thread is None:
self._launch()
# Set up a request for the worker thread
done_evt = threading.Event()
done_evt.clear()
cancelled = False
future = _FutureLess()
def run_callable():
try:
future.set_result(func(*args))
except BaseException as err:
future.set_exception(err)
finally:
done_evt.wait()
if cancelled and call_on_cancel:
call_on_cancel(future)
self.request = run_callable
try:
await _future_wait(future, self.start_evt)
return future.result()
except CancelledError as e:
cancelled = True
self.shutdown()
raise
finally:
done_evt.set()
class ProcessWorker(object):
'''
Managed process worker for running CPU-intensive tasks. The main
purpose of this class is to run workers with reliable
cancellation/timeout semantics. Specifically, if a worker is
cancelled, the underlying process is also killed. This, as
opposed to having it linger on running until work is complete.
'''
def __init__(self, pool):
self.process = None
self.client_ch = None
self.terminated = False
self.pool = pool
def _launch(self):
context = multiprocessing.get_context('spawn')
client_ch, server_ch = context.Pipe()
self.process = context.Process(
target=self.run_server, args=(server_ch, ), daemon=True)
self.process.start()
server_ch.close()
self.client_ch = Connection.from_Connection(client_ch)
def shutdown(self):
self.terminated = True
if self.process:
self.process.terminate()
self.process = None
self.nrequests = 0
async def release(self):
if self.pool:
await self.pool.release(self)
def run_server(self, ch):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_IGN)
while True:
func, args = ch.recv()
try:
result = func(*args)
ch.send((True, result))
except Exception as e:
e = ExceptionWithTraceback(e, e.__traceback__)
ch.send((False, e))
del func, args
async def apply(self, func, args=()):
if self.process is None or not self.process.is_alive():
self._launch()
msg = (func, args)
try:
await self.client_ch.send(msg)
success, result = await self.client_ch.recv()
if success:
return result
else:
raise result
except CancelledError:
self.shutdown()
raise
# Windows-compatible process worker. It differs from ProcessWorker in
# that client communication is handled synchronously by a thread.
class WinProcessWorker(ProcessWorker):
def _launch(self):
context = multiprocessing.get_context('spawn')
client_ch, server_ch = context.Pipe()
self.process = context.Process(
target=self.run_server, args=(server_ch, ), daemon=True)
self.process.start()
server_ch.close()
self.client_ch = client_ch
def _client_communicate(self, msg):
self.client_ch.send(msg)
return self.client_ch.recv()
async def apply(self, func, args=()):
if self.process is None or not self.process.is_alive():
self._launch()
msg = (func, args)
try:
success, result = await run_in_thread(self._client_communicate, msg)
if success:
return result
else:
raise result
except CancelledError:
self.shutdown()
raise
if sys.platform.startswith('win'):
ProcessWorker = WinProcessWorker
# Pool of workers for carrying out jobs on behalf of curio tasks.
#
# This pool works a bit differently than a normal thread/process
# pool due to some of the different ways that threads get used in Curio.
# Instead of submitting work to the pool, you use the reserve() method
# to obtain a worker:
#
# worker = await pool.reserve()
#
# Once you have a worker, it is yours for as long as you want to have
# it. To submit work to it, use the apply() method:
#
# await worker.apply(callable, args)
#
# When you're done with it, release it back to the pool.
#
# await worker.release()
#
# Some rationale for this design: Sometimes when you're working with
# threads, you want to perform multiple steps and you need to make sure
# you're performing each step on the same thread for some reason. This
# is especially true if you're trying to manage work cancellation.
# For example, work started in a thread might need to be cleaned up
# on the same thread. By reserving/releasing workers, we get more
# control over the whole process of how workers get managed.
class WorkerPool(object):
def __init__(self, workercls, nworkers):
self.nworkers = sync.Semaphore(nworkers)
self.workercls = workercls
self.workers = []
def shutdown(self):
for worker in self.workers:
worker.shutdown()
self.workers = []
async def reserve(self):
await self.nworkers.acquire()
if not self.workers:
return self.workercls(self)
else:
return self.workers.pop()
async def release(self, worker):
if not worker.terminated:
self.workers.append(worker)
await self.nworkers.release()
# Pool definitions should anyone want to use them directly
ProcessPool = lambda nworkers: WorkerPool(ProcessWorker, nworkers)
ThreadPool = lambda nworkers: WorkerPool(ThreadWorker, nworkers)
|
run_server3.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""run_server.py run the Clusterfuzz server locally."""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
import shutil
import threading
import time
import urllib.request
from local.butler import appengine
from local.butler import common
from local.butler import constants
from python.config import local_config
from python.tests.test_libs import test_utils
def bootstrap_db():
"""Bootstrap the DB."""
def bootstrap():
# Wait for the server to run.
time.sleep(10)
print('Bootstrapping datastore...')
common.execute(
('python butler.py run setup '
'--non-dry-run --local --config-dir={config_dir}'
).format(config_dir=constants.TEST_CONFIG_DIR),
exit_on_error=False)
thread = threading.Thread(target=bootstrap)
thread.start()
def create_local_bucket(local_gcs_buckets_path, name):
"""Create a local bucket."""
blobs_bucket = os.path.join(local_gcs_buckets_path, name)
if not os.path.exists(blobs_bucket):
os.mkdir(blobs_bucket)
def bootstrap_gcs(storage_path):
"""Bootstrap GCS."""
local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')
if not os.path.exists(local_gcs_buckets_path):
os.mkdir(local_gcs_buckets_path)
config = local_config.ProjectConfig()
create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))
create_local_bucket(local_gcs_buckets_path,
config.get('env.QUARANTINE_BUCKET'))
create_local_bucket(local_gcs_buckets_path,
config.get('env.SHARED_CORPUS_BUCKET'))
create_local_bucket(local_gcs_buckets_path,
config.get('env.FUZZ_LOGS_BUCKET'))
create_local_bucket(local_gcs_buckets_path,
config.get('env.MUTATOR_PLUGINS_BUCKET'))
# Symlink local GCS bucket path to appengine src dir to bypass sandboxing
# issues.
common.symlink(
src=local_gcs_buckets_path,
target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))
def start_cron_threads():
"""Start threads to trigger essential cron jobs."""
request_timeout = 10 * 60 # 10 minutes.
def trigger(interval_seconds, target):
"""Trigger a cron job."""
while True:
time.sleep(interval_seconds)
try:
url = 'http://{host}/{target}'.format(
host=constants.CRON_SERVICE_HOST, target=target)
request = urllib.request.Request(url)
request.add_header('X-Appengine-Cron', 'true')
response = urllib.request.urlopen(request, timeout=request_timeout)
response.read(60) # wait for request to finish.
except Exception:
continue
crons = (
(90, 'cleanup'),
(60, 'triage'),
(6 * 3600, 'schedule-progression-tasks'),
(12 * 3600, 'schedule-corpus-pruning'),
)
for interval, cron in crons:
thread = threading.Thread(target=trigger, args=(interval, cron))
thread.daemon = True
thread.start()
def execute(args):
"""Run the server."""
os.environ['LOCAL_DEVELOPMENT'] = 'True'
common.kill_leftover_emulators()
if not args.skip_install_deps:
common.install_dependencies()
# Do this everytime as a past deployment might have changed these.
appengine.symlink_dirs()
# Deploy all yaml files from test project for basic appengine deployment and
# local testing to work. This needs to be called on every iteration as a past
# deployment might have overwritten or deleted these config files.
yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod')
appengine.copy_yamls_and_preprocess(yaml_paths)
# Build templates.
appengine.build_templates()
# Clean storage directory if needed.
if args.bootstrap or args.clean:
if os.path.exists(args.storage_path):
print('Clearing local datastore by removing %s.' % args.storage_path)
shutil.rmtree(args.storage_path)
if not os.path.exists(args.storage_path):
os.makedirs(args.storage_path)
# Set up local GCS buckets and symlinks.
bootstrap_gcs(args.storage_path)
# Start pubsub emulator.
pubsub_emulator = test_utils.start_cloud_emulator(
'pubsub',
args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],
data_dir=args.storage_path)
test_utils.setup_pubsub(constants.TEST_APP_ID)
# Start Datastore emulator
datastore_emulator = test_utils.start_cloud_emulator(
'datastore',
args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],
data_dir=args.storage_path)
# Start our custom GCS emulator.
local_gcs = common.execute_async(
'bazel run //go/testing/gcs '
'--sandbox_writable_path=$(pwd)/../local/storage/local_gcs '
'-- -storage-path=$(pwd)/../local/storage/local_gcs',
cwd='src')
if args.bootstrap:
bootstrap_db()
start_cron_threads()
os.environ['APPLICATION_ID'] = constants.TEST_APP_ID
os.environ['LOCAL_DEVELOPMENT'] = 'True'
os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'
os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST
os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST
os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST
os.environ['GAE_ENV'] = 'True'
try:
cron_server = common.execute_async(
'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),
cwd=os.path.join('src', 'appengine'))
common.execute(
'gunicorn -b :{port} main:app'.format(
port=constants.DEV_APPSERVER_PORT),
cwd=os.path.join('src', 'appengine'))
except KeyboardInterrupt:
print('Server has been stopped. Exit.')
cron_server.terminate()
datastore_emulator.cleanup()
pubsub_emulator.cleanup()
local_gcs.terminate()
|
converters.py | import pdb
import pandas as pd
import numpy as np
import h5py
import os, json
from glob import glob
import warnings
import multiprocessing as mp
from itertools import repeat
from tqdm import tqdm
from threading import Thread
from copy import deepcopy
from functools import lru_cache, partial
from time import sleep
from ipaddress import ip_address
from numba import jit
##########
# Converting CSV to HDF5
def identity(x):
return x
class Converter:
def __init__(self, dtype, NAval=None, preprocessor=identity):
self.dtype = np.dtype(dtype)
self.preprocessor = preprocessor
# Convert user-specified NA value to dtype
if NAval is not None:
self.NAval = self.dtype.type(NAval)
# Default NA for signed integer is -1 in dtype
elif self.dtype.kind == 'i':
self.NAval = self.dtype.type(-1)
# Default NA for unsigned integer is high-bit set, other bits 0
elif self.dtype.kind == 'u':
self.NAval = self.dtype.type(1 << (8*self.dtype.itemsize - 1))
# Default NA for floats is NaN
elif self.dtype.kind == 'f':
self.NAval = self.dtype.type(np.nan)
else:
raise ValueError(f"Undefined NA value for {dtype}")
def __call__(self, val):
try:
return self.dtype.type(self.preprocessor(val))
except:
return self.NAval
def u2i(pyint):
i64 = pd.Series(data=np.zeros(pyint.shape[0], dtype=np.int64), index=pyint.index)
needsoffset = pyint >= 2**63
i64.loc[~needsoffset] = pyint.loc[~needsoffset].astype(np.int64)
i64.loc[needsoffset] = (pyint.loc[needsoffset] - 2**64).astype(np.int64)
return i64
def pyIP2np(intip):
ip1 = pd.Series(data=np.zeros(intip.shape[0], dtype=np.int64), index=intip.index)
ip2 = pd.Series(data=np.zeros(intip.shape[0], dtype=np.int64), index=intip.index)
isv6 = intip > 2**32
isv4 = (intip > 0) & (~isv6)
ip1.loc[isv4] = intip.loc[isv4].astype(np.int64)
ip1.loc[isv6] = u2i(intip.loc[isv6].apply(lambda x: x >> 64))
ip2.loc[isv6] = u2i(intip.loc[isv6].apply(lambda x: x & (2**64 - 1)))
return ip1, ip2
def split_IP(df, IPcolumns, suffix1='', suffix2='2'):
outcols = set()
for col in IPcolumns:
hi, lo = pyIP2np(df[col].apply(lambda x:int(ip_address(x.strip()))))
df[col+suffix1] = hi
df[col+suffix2] = lo
outcols.add(col+suffix1)
outcols.add(col+suffix2)
return df, outcols
# Encodes string w/UTF-8 to get an accurate byte count
def to_bytes(string, encoding="utf-8"):
return string.encode(encoding=encoding)
def pack_strings(column, encoding="utf-8"):
''' Takes pandas column w/ data of type str, converts it into two arrays:
an array of null-separated 'strings', and an array of associated
offsets. Assumes strings are encoded in utf-8, can be altered if
necessary using the `encoding` arg.
'''
try:
_to_bytes = partial(to_bytes, encoding=encoding)
lengths = column.apply(_to_bytes).apply(len).values
offsets = lengths.cumsum() + np.arange(lengths.shape[0]) - lengths
totalbytes = lengths.sum() + lengths.shape[0]
packed = np.zeros(shape=(totalbytes,), dtype=np.uint8)
for (o, s) in zip(offsets, column.values):
for i, b in enumerate(s.encode()):
packed[o+i] = b
except Exception as e:
raise e
return packed, offsets
def write_strings(f, group, packed, offsets, compression=None, mode='w'):
''' Stores strings as encoded in packed, offsets in hdf5 group.
'''
try:
g = f.create_group(group)
g.attrs["segmented_string"] = np.bool(True)
g.create_dataset('values', data=packed, compression=compression)
g.create_dataset('segments', data=offsets, compression=compression)
g['values'].attrs['dtype'] = np.string_(np.dtype(np.uint8).str)
g['segments'].attrs['dtype'] = np.string_(np.dtype(np.int64).str)
# There's not really an easy NA value to encode, so passing on that.
except TypeError as e:
print(f"Error creating group {group} in file {f.filename}")
print(e)
def read_strings(group):
if not isinstance(group, h5py.Group):
raise TypeError(f"String array must be an HDF5 group; got {type(group)}")
if 'segments' not in group or 'values' not in group:
raise ValueError(f"String group must contain 'segments' and 'values' datasets; got {group.keys()}")
values = group['values']
offsets = np.hstack((group['segments'], values.shape[0]))
lengths = np.diff(offsets) - 1
res = [''.join(chr(b) for b in values[o:o+l]) for (o, l) in zip(offsets, lengths)]
return res
def _normalize_dtype(col, dtype, typeonly=False):
''' Transforms data to <dtype> specified. If dtype is a Converter,
transforms data to match desired type (as described in Converter).
If data is string data, then packs strings into custom format before
returning.
'''
dtypes, data = None, None
if isinstance(dtype, Converter):
if typeonly:
dtypes = (dtype.dtype,)
else:
data = col.astype(dtype.dtype).values
pdstr = dtype.dtype.str
NAvalue = dtype.NAval
# pack_strings returns uint8s and int64s.
elif dtype == np.str_ or (dtype is None and col.dtype == 'O'):
if typeonly:
dtypes = (np.uint8().dtype, np.int64().dtype)
else:
data = pack_strings(col)
pdstr = 'str'
NAvalue = ""
elif dtype is not None:
try:
if typeonly:
dtypes = (col.astype(dtype).values.dtype,)
else:
data = col.astype(dtype).values
except Exception as e:
print(f"Could not coerce {col.name} to {dtype}\n{col}")
raise e
if callable(dtype):
pdstr = dtype().dtype.str
else:
pdstr = dtype.str
NAvalue = np.nan
# Convert datetime64 to int64 values, but preserve the original
# dtype so that it can survive a round-trip
elif col.dtype.kind == 'M':
if typeonly:
dtypes = (np.int64().dtype,)
else:
data = col.astype(np.int64).values
pdstr = col.dtype.str
NAvalue = pd.NaT.value
else:
if typeonly:
dtypes = (col.values.dtype,)
else:
data = col.values
pdstr = col.dtype.str
NAvalue = np.nan
return dtypes or data, pdstr, NAvalue
def col2dset(name, col, f, dtype=None, compression=None):
'''Write a pandas Series <col> to dataset <name> in HDF5 file <f>.
Optionally, specify a dtype to convert to before writing. Compression
with gzip is also supported.'''
data, pdstr, NAvalue = _normalize_dtype(col, dtype)
# Write the dataset to the file
if dtype == np.str_ or (dtype is None and col.dtype == 'O'):
write_strings(f, name, data[0], data[1], compression=compression)
else:
try:
dset = f.create_dataset(name, data=data, compression=compression)
# Store the normalized dtype as an attribute of the dataset
dset.attrs['dtype'] = np.string_(pdstr)
dset.attrs['NAvalue'] = NAvalue
except TypeError as e:
print(f"Error creating dataset {name} in file {f.filename}")
print(e)
def df2hdf(filename, df, internal_dtypes={}, compression=None, attempts=10, interval=30, raise_errors=False):
'''Write a pandas DataFrame <df> to a HDF5 file <filename>. Optionally,
specify internal_dtypes for converting the columns and a compression to use.'''
for _ in range(attempts):
try:
with h5py.File(filename, 'w') as f:
for colname in df.columns:
col2dset(colname, df[colname], f, dtype=internal_dtypes.get(colname, None), compression=compression)
return
except Exception as e:
print(e)
if raise_errors:
raise e
else:
sleep(interval)
# If here, then saving to HDF5 failed
if os.path.exists(filename):
os.remove(filename)
print(f"Error saving converted file to HDF5 {filename}:")
def convert_file(args):
'''Convert one file from CSV to HDF5.'''
filename, outdir, extension, options, internal_dtypes, compression, transforms = args
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
transformer = options.get('transformer', lambda x:x)
df = transformer(pd.read_csv(filename, **options))
except Exception as e:
print(f"Error converting {filename}:")
print(e)
return
for transform in transforms:
df, _ = transform(df)
newname = os.path.splitext(os.path.basename(filename))[0] + extension
df2hdf(os.path.join(outdir, newname), df, internal_dtypes, compression)
def _get_valid_columns(filename, options, internal_dtypes, transforms):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=pd.errors.ParserWarning)
predf = pd.read_csv(filename, nrows=1000, **options)
except Exception as e:
print(f"Error reading sample DataFrame:")
print(e)
return []
required = set()
df = predf.copy()
for transform in transforms:
# Transforms must 1) append columns to end, and 2) return names of columns added/altered
df, outcols = transform(df)
required |= outcols
validcols = []
pdstrs = []
hdftypes = []
NAvalues = []
for i, col in enumerate(df.columns):
dt = internal_dtypes.get(col, None)
dtypes, pdstr, NAvalue = _normalize_dtype(df[col], dt, typeonly=True)
try:
for dtype in dtypes:
h5py.h5t.py_create(dtype, logical=True)
except TypeError as e:
if col in required:
raise TypeError(f'Column {col} is required but dtype "{df[col].values.dtype}" has no HDF5 equivalent.') from e
else:
print(f'Ignoring column {col} because dtype "{df[col].values.dtype}" has no HDF5 equivalent.')
continue
validcols.append(i)
pdstrs.append(pdstr)
for dtype in dtypes:
hdftypes.append(dtype.str)
NAvalues.append(NAvalue)
validnames = df.columns[validcols]
print("Columns to be extracted:")
maxname = max(map(len, validnames))
maxnorm = max(map(len, pdstrs))
maxhdf = max(map(len, hdftypes))
rowfmt = f"{{:<{maxname}}} {{:<{maxnorm}}} {{:<{maxhdf}}} {{}}"
print(rowfmt.format('Column', 'pd type', 'hdf5 type', 'NA val'))
for name, norm, hdf, NA in zip(validnames, pdstrs, hdftypes, NAvalues):
print(rowfmt.format(name, norm, hdf, NA))
#print(df[df.columns[validcols]].info())
used = options.get('usecols', list(range(predf.shape[1])))
if len(used) > 0 and isinstance(used[0], int):
used = [predf.columns[u] for u in used]
new_usecols = [used[v] for v in validcols[:predf.shape[1]]]
return new_usecols, set(validnames)
def convert_files(filenames, outdir, extension, options, compression, pool):
'''Convert all CSV files in <filenames> to HDF5 files, replacing their
extensions with <extension> and saving them in <outdir>. Formatting
and dtype information for the data source is in <options>.
Conversion runs in parallel unless pool=None.'''
if not os.path.isdir(outdir) or not os.access(outdir, os.W_OK):
raise OSError(f"Permission denied: {outdir}")
internal_dtypes = options.get('dtype', {})
converters = options.get('converters', {})
if 'transforms' in options:
transforms = options['transforms']
del options['transforms']
else:
transforms = []
for field, conv in converters.items():
if isinstance(conv, Converter):
internal_dtypes[field] = conv
usecols, validnames = _get_valid_columns(filenames[0], options, internal_dtypes, transforms)
if len(usecols) == 0:
raise TypeError("No columns found with HDF5-compatible dtype")
options['usecols'] = usecols
# LTL: Seems like we're preserving internal_dtypes, but, unfortunately, not the data we just created. This might need to be figured out.
arglist = zip(filenames, repeat(outdir), repeat(extension), repeat(options), repeat(internal_dtypes), repeat(compression), repeat(transforms))
if pool is None:
_ = list(tqdm(map(convert_file, arglist), total=len(filenames)))
elif hasattr(pool, 'imap_unordered'):
_ = list(tqdm(pool.imap_unordered(convert_file, arglist), total=len(filenames)))
else:
_ = list(pool.map(convert_file, arglist))
#############
# Reading HDF5 data into a DataFrame
def read_hdf(filenames, columns=None, progbar=True):
'''Read many HDF5 files into one DataFrame. All HDF5 files must
have the same schema: i.e. the number, names, and dtypes of the
constituent datasets must match. Each HDF5 dataset will create
a column of the same name in the DataFrame. If the HDF5 dataset
has a string attribute named 'dtype', it will be used as the
column dtype. Otherwise, the dtype will be inferred from the
native HDF5 type.
This function is faster and more memory efficient than reading
each HDF5 file into its own DataFrame and calling pandas.concat,
because this function does not incur any copies.'''
if isinstance(filenames, str) or isinstance(filenames, bytes):
filenames = glob(filenames)
# Allocate an empty dataframe and get the row offsets of each file
df, offsets, lengths = _hdf_alloc(filenames, columns)
if progbar:
iterator = tqdm(zip(filenames, offsets), total=len(filenames))
else:
iterator = zip(filenames, offsets)
for fname, ind in iterator:
# Read the data directly into the empty dataframe
_hdf_insert(fname, df, ind)
if columns is not None:
return df[columns]
else:
return df
# Threading does not get any speedup, so this function is not exposed
def _read_hdf_multithreaded(df, filenames, offsets):
jobs = [Thread(target=_hdf_insert, args=(fname, df, ind)) for fname, ind in zip(filenames, offsets)]
for j in jobs:
j.start()
for j in tqdm(jobs):
j.join()
def _hdf_alloc(filenames, columns):
if len(filenames) == 0:
raise ValueError("Need at least one file to allocate a DataFrame")
# Get the reference dtypes from the first file
dtypes, _, NAvalues = _get_column_metadata(filenames[0], columns)
offsets = [0]
lengths = []
for fn in filenames:
# Ensure each file has same number of columns and same dtypes as reference
thisdtypes, thislength, _ = _get_column_metadata(fn, columns)
if len(thisdtypes) != len(dtypes):
raise ValueError("Number of columns must be constant across all files")
if thisdtypes != dtypes:
raise ValueError("Columns have inhomogenous names or dtypes across files")
offsets.append(offsets[-1] + thislength)
lengths.append(thislength)
# last entry is total length, remove it so it won't be used as offset
total = offsets.pop()
# Allocate uninitialized memory for columns
column_dict = {col:pd.Series(np.empty(shape=(total,), dtype=dtypes[col])) for col in dtypes}
df = pd.DataFrame(column_dict)
df.NAvalues = NAvalues
return df, offsets, lengths
def _get_column_metadata(filename, columns):
dtypes = {}
length = -1
NAvalues = {}
with h5py.File(filename, 'r') as f:
if columns is None:
# Use HDF5 dataset names as column names
columns = set(f.keys())
else:
columns = set(columns) & set(f.keys())
for colname in columns:
dset = f[colname]
if isinstance(dset, h5py.Group):
dtypes[colname] = 'str'
assert 'segments' in dset and 'values' in dset, "Column is HDF5 Group but does not have segments and values like a string"
thislen = dset['segments'].shape[0]
else:
# Check for user-specified dtype in dset attribute,
# otherwise use native HDF5 dtype.
dt = dset.attrs.get('dtype', dset.dtype).decode()
if dt == 'category':
dtypes[colname] = dset.dtype
relpath = os.path.join(os.path.dirname(filename), dset.attrs['codemap_relpath'].decode())
codemaps[colname] = os.path.realpath(relpath)
else:
dtypes[colname] = dt
thislen = dset.shape[0]
# Set length on the first column, and test that
# all other columns have the same length
if length == -1:
length = thislen
else:
assert length == thislen, f'Columns of unequal length in {filename}'
NAvalues[colname] = dset.attrs.get('NAvalue', np.nan)
return dtypes, length, NAvalues
def _hdf_insert(filename, df, index):
with h5py.File(filename, 'r') as f:
# _hdf_alloc has already guaranteed that file has correct column names
for colname in df.columns:
if df[colname].dtype.name == 'object':
size = f[colname]['segments'].shape[0]
df.loc[index:index+size, colname] = read_strings(f[colname])
else:
dset = f[colname]
size = dset.shape[0]
# Read the dataset directly into the uninitialized column
df[colname].values[index:index+size] = dset[:]
|
neon_skill_tests.py | # NEON AI (TM) SOFTWARE, Software Development Kit & Application Framework
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2022 Neongecko.com Inc.
# Contributors: Daniel McKnight, Guy Daniels, Elon Gasper, Richard Leeds,
# Regina Bloomstine, Casimiro Ferreira, Andrii Pernatii, Kirill Hrymailo
# BSD-3 License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import sys
import unittest
from multiprocessing import Event
from threading import Thread
from time import sleep
from mycroft_bus_client import Message
from ovos_utils.messagebus import FakeBus
from mock import Mock
from mycroft.skills.fallback_skill import FallbackSkill
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
# from neon_utils.language_utils import LanguageDetector, LanguageTranslator
from neon_utils.cache_utils import LRUCache
from neon_utils.configuration_utils import NGIConfig
from neon_utils import check_for_signal
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from skills import *
MycroftSkill = PatchedMycroftSkill
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SKILL_PATH = os.path.join(ROOT_DIR, "skills")
def get_test_mycroft_skill(bus_events: dict):
skill = MycroftSkill()
bus = FakeBus()
for event, callback in bus_events.items():
bus.on(event, callback)
bus.run_in_thread()
skill.bind(bus)
return skill
def get_test_neon_skill(bus_events: dict):
skill = NeonSkill()
bus = FakeBus()
for event, callback in bus_events.items():
bus.on(event, callback)
bus.run_in_thread()
skill.bind(bus)
return skill
class SkillObjectTests(unittest.TestCase):
def test_common_message_skill_init(self):
skill = TestCMS()
self.assertIsInstance(skill, MycroftSkill)
self.assertIsInstance(skill, NeonSkill)
self.assertIsInstance(skill, CommonMessageSkill)
self.assertEqual(skill.name, "Test Common Message Skill")
def test_common_play_skill_init(self):
skill = TestCPS()
self.assertIsInstance(skill, MycroftSkill)
self.assertIsInstance(skill, NeonSkill)
self.assertIsInstance(skill, CommonPlaySkill)
self.assertEqual(skill.name, "Test Common Play Skill")
def test_common_query_skill_init(self):
skill = TestCQS()
self.assertIsInstance(skill, MycroftSkill)
self.assertIsInstance(skill, NeonSkill)
self.assertIsInstance(skill, CommonQuerySkill)
self.assertEqual(skill.name, "Test Common Query Skill")
def test_fallback_skill_init(self):
skill = TestFBS()
self.assertIsInstance(skill, MycroftSkill)
self.assertIsInstance(skill, NeonSkill)
self.assertIsInstance(skill, NeonFallbackSkill)
self.assertIsInstance(skill, FallbackSkill)
self.assertEqual(skill.name, "Test Fallback Skill")
def test_neon_skill_init(self):
skill = TestNeonSkill()
self.assertIsInstance(skill, MycroftSkill)
self.assertIsInstance(skill, NeonSkill)
self.assertEqual(skill.name, "Test Neon Skill")
self.assertIsInstance(skill.user_config, NGIConfig)
self.assertIsInstance(skill.local_config, NGIConfig)
self.assertIsInstance(skill.lru_cache, LRUCache)
self.assertIsInstance(skill.sys_tz, datetime.tzinfo)
self.assertIsInstance(skill.gui_enabled, bool)
self.assertIsInstance(skill.scheduled_repeats, list) # TODO: What is this param for?
self.assertIsInstance(skill.server, bool)
self.assertIsInstance(skill.default_intent_timeout, int)
self.assertFalse(skill.neon_core) # TODO: Test against NeonCore too DM
self.assertIsInstance(skill.actions_to_confirm, dict)
self.assertIsInstance(skill.skill_mode, str)
self.assertIsInstance(skill.extension_time, int)
self.assertIsInstance(skill.language_config, dict)
self.assertIsNone(skill.lang_detector)
self.assertIsNone(skill.translator)
# self.assertIsInstance(skill.lang_detector, LanguageDetector)
# self.assertIsInstance(skill.translator, LanguageTranslator)
self.assertIsInstance(skill.settings, dict)
self.assertIsInstance(skill.location_timezone, str)
self.assertIsInstance(skill.preference_brands(), dict)
self.assertIsInstance(skill.preference_user(), dict)
self.assertIsInstance(skill.preference_location(), dict)
self.assertIsInstance(skill.preference_unit(), dict)
self.assertIsInstance(skill.preference_speech(), dict)
self.assertIsInstance(skill.preference_skill(), dict)
self.assertIsInstance(skill.build_user_dict(), dict)
self.assertEqual(skill.file_system.path, skill.settings_write_path)
self.assertNotEqual(os.path.basename(skill.file_system.path), skill.name)
def test_patched_mycroft_skill_init(self):
skill = TestPatchedSkill()
self.assertIsInstance(skill, MycroftSkill)
self.assertEqual(skill.name, "Test Mycroft Skill")
self.assertEqual(skill.file_system.path, skill.settings_write_path)
self.assertNotEqual(os.path.basename(skill.file_system.path), skill.name)
class PatchedMycroftSkillTests(unittest.TestCase):
def test_get_response_simple(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog)
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response."
message = Message("recognizer_loop:utterance", {"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "local"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(message)
t.join(5)
self.assertEqual(test_results[message.context["username"]], message.data["utterances"][0])
def test_get_response_no_username(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog)
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response."
message = Message("recognizer_loop:utterance", {"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {}})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, "0"), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(message)
t.join(5)
self.assertEqual(test_results["0"], message.data["utterances"][0])
def test_get_response_multi_user(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog, message=Message("converse_message", {},
{"username": "valid_converse_user"}))
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response multi user."
valid_message = Message("recognizer_loop:utterance",
{"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "valid_converse_user"})
invalid_message = Message("recognizer_loop:utterance",
{"utterances": ["invalid return"]},
{"timing": {},
"username": "invalid_converse_user"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, valid_message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(invalid_message)
skill.converse(valid_message)
skill.converse(invalid_message)
t.join(5)
self.assertEqual(test_results[valid_message.context["username"]], valid_message.data["utterances"][0])
def test_get_response_dig_for_message(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def skill_response_thread(s: MycroftSkill, idx: str):
def intent_handler(message):
resp = s.get_response(test_dialog)
test_results[idx] = resp
intent_handler(Message("converse_message", {}, {"username": "valid_converse_user"}))
test_results = dict()
spoken = Event()
test_dialog = "testing get response multi user."
valid_message = Message("recognizer_loop:utterance",
{"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "valid_converse_user"})
invalid_message = Message("recognizer_loop:utterance",
{"utterances": ["invalid return"]},
{"timing": {},
"username": "invalid_converse_user"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, valid_message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(invalid_message)
skill.converse(valid_message)
skill.converse(invalid_message)
t.join(5)
self.assertEqual(test_results[valid_message.context["username"]], valid_message.data["utterances"][0])
def test_get_response_no_response(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog, message=Message("converse_message", {},
{"username": "valid_converse_user"}))
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response multi user."
valid_message = Message("recognizer_loop:utterance",
{"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "valid_converse_user"})
invalid_message = Message("recognizer_loop:utterance",
{"utterances": ["invalid return"]},
{"timing": {},
"username": "invalid_converse_user"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, valid_message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(invalid_message)
t.join(30)
self.assertIsNone(test_results[valid_message.context["username"]])
def test_get_response_validator_pass(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def is_valid(_):
test_results["validator"] = True
return True
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog, validator=is_valid, message=Message("converse_message", {},
{"username": "valid_converse_user"}))
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response multi user."
valid_message = Message("recognizer_loop:utterance",
{"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "valid_converse_user"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, valid_message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(valid_message)
t.join(30)
self.assertTrue(test_results["validator"])
self.assertEqual(test_results[valid_message.context["username"]], valid_message.data["utterances"][0])
def test_get_response_validator_fail(self):
def handle_speak(_):
check_for_signal("isSpeaking")
spoken.set()
def is_valid(_):
test_results["validator"] = True
return False
on_fail = Mock()
def skill_response_thread(s: MycroftSkill, idx: str):
resp = s.get_response(test_dialog, validator=is_valid, on_fail=on_fail,
message=Message("converse_message", {},
{"username": "valid_converse_user"}))
test_results[idx] = resp
test_results = dict()
spoken = Event()
test_dialog = "testing get response multi user."
valid_message = Message("recognizer_loop:utterance",
{"utterances": ["testing one", "testing 1", "resting one"]},
{"timing": {},
"username": "valid_converse_user"})
skill = get_test_mycroft_skill({"speak": handle_speak})
t = Thread(target=skill_response_thread, args=(skill, valid_message.context["username"]), daemon=True)
t.start()
spoken.wait(30)
sleep(1)
skill.converse(valid_message)
t.join(30)
self.assertTrue(test_results["validator"])
on_fail.assert_called_once()
on_fail.assert_called_with("testing one")
def test_speak_simple_valid(self):
handle_speak = Mock()
utterance = "test to speak"
skill = get_test_mycroft_skill({"speak": handle_speak})
skill.speak(utterance)
handle_speak.assert_called_once()
msg = handle_speak.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertIsNone(msg.data["speaker"])
def test_speak_speaker_valid(self):
handle_speak = Mock()
utterance = "test to speak"
speaker = {"speaker": "Test Speaker",
"language": "en-au",
"gender": "female"}
skill = get_test_mycroft_skill({"speak": handle_speak})
skill.speak(utterance, speaker=speaker)
handle_speak.assert_called_once()
msg = handle_speak.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertEqual(msg.data["speaker"], speaker)
def test_speak_simple_with_message_valid(self):
message = Message("date-time.neon:handle_query_time", {'intent_type': 'date-time.neon:handle_query_time',
'target': None,
'confidence': 0.6666666666666666,
'utterance': 'what time is it neon'},
{'client_name': 'mycroft_cli',
'source': ['skills'],
'destination': 'debug_cli',
'client': 'local',
'neon_should_respond': False,
'timing': {'transcribed': 1631062887.5719671,
'text_parsers': 0.34954047203063965,
'speech_start': 1631062888.1001909},
'audio_file': '',
'skill_id': 'date-time.neon'})
handle_speak = Mock()
utterance = "test to speak"
skill = get_test_mycroft_skill({"speak": handle_speak})
skill.speak(utterance, message=message)
handle_speak.assert_called_once()
msg = handle_speak.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertIsNone(msg.data["speaker"])
self.assertEqual(message.context.pop("source"), msg.context.pop("destination"))
self.assertEqual(message.context.pop("destination"), msg.context.pop("source"))
self.assertEqual(message.context, msg.context)
def test_speak_speaker_with_message_override_valid(self):
message = Message("date-time.neon:handle_query_time", {'intent_type': 'date-time.neon:handle_query_time',
'target': None,
'confidence': 0.6666666666666666,
'utterance': 'what time is it neon',
'speaker': {"speaker": "invalid speaker",
"language": "es-es"}},
{'client_name': 'mycroft_cli',
'source': ['skills'],
'destination': 'debug_cli',
'client': 'local',
'neon_should_respond': False,
'timing': {'transcribed': 1631062887.5719671,
'text_parsers': 0.34954047203063965,
'speech_start': 1631062888.1001909},
'audio_file': '',
'skill_id': 'date-time.neon'})
handle_speak = Mock()
utterance = "test to speak"
speaker = {"speaker": "Test Speaker",
"language": "en-au",
"gender": "female"}
skill = get_test_mycroft_skill({"speak": handle_speak})
skill.speak(utterance, speaker=speaker, message=message)
handle_speak.assert_called_once()
msg = handle_speak.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertEqual(msg.data["speaker"], speaker)
self.assertEqual(message.context.pop("source"), msg.context.pop("destination"))
self.assertEqual(message.context.pop("destination"), msg.context.pop("source"))
self.assertEqual(message.context, msg.context)
def test_speak_speaker_with_message_valid(self):
speaker = {"speaker": "Test Speaker",
"language": "en-au",
"gender": "female"}
message = Message("date-time.neon:handle_query_time", {'intent_type': 'date-time.neon:handle_query_time',
'target': None,
'confidence': 0.6666666666666666,
'utterance': 'what time is it neon',
'speaker': speaker},
{'client_name': 'mycroft_cli',
'source': ['skills'],
'destination': 'debug_cli',
'client': 'local',
'neon_should_respond': False,
'timing': {'transcribed': 1631062887.5719671,
'text_parsers': 0.34954047203063965,
'speech_start': 1631062888.1001909},
'audio_file': '',
'skill_id': 'date-time.neon'})
handle_speak = Mock()
utterance = "test to speak"
skill = get_test_mycroft_skill({"speak": handle_speak})
skill.speak(utterance, message=message)
handle_speak.assert_called_once()
msg = handle_speak.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertEqual(msg.data["speaker"], speaker)
self.assertEqual(message.context.pop("source"), msg.context.pop("destination"))
self.assertEqual(message.context.pop("destination"), msg.context.pop("source"))
self.assertEqual(message.context, msg.context)
def test_speak_emit_response_valid(self):
message = Message("date-time.neon:handle_query_time", {'intent_type': 'date-time.neon:handle_query_time',
'target': None,
'confidence': 0.6666666666666666,
'utterance': 'what time is it neon'},
{'client_name': 'mycroft_cli',
'source': ['skills'],
'destination': 'debug_cli',
'client': 'local',
'neon_should_respond': False,
'timing': {'transcribed': 1631062887.5719671,
'text_parsers': 0.34954047203063965,
'speech_start': 1631062888.1001909},
'audio_file': '',
'skill_id': 'date-time.neon',
"cc_data": {"emit_response": True}})
handle_execute_response = Mock()
utterance = "test to speak"
skill = get_test_mycroft_skill({"skills:execute.response": handle_execute_response})
skill.speak(utterance, message=message)
handle_execute_response.assert_called_once()
msg = handle_execute_response.call_args.args[0]
self.assertIsInstance(msg, Message)
self.assertEqual(msg.data["utterance"], utterance)
self.assertEqual(msg.data["expect_response"], False)
self.assertIsInstance(msg.data["meta"], dict)
self.assertEqual(message.context.pop("source"), msg.context.pop("destination"))
self.assertEqual(message.context.pop("destination"), msg.context.pop("source"))
self.assertEqual(message.context, msg.context)
# TODO: Test settings load
class NeonSkillTests(unittest.TestCase):
def test_send_email_valid(self):
skill = get_test_neon_skill(dict())
self.assertTrue(skill.send_email("Test Message",
"This is a test\ncalled from neon_skill_tests.py in neon-utils",
email_addr="test@neongecko.com"))
# TODO: NeonSkill Tests
if __name__ == '__main__':
unittest.main()
|
internal_api.py | import cx_Oracle
import threading
# Debug
from pprint import pprint as pp
class INTERNAL_API(object):
__limit = 1000
def __init__(self, connString:str):
self._dest = connString
def __connect(self):
connection = cx_Oracle.connect(self._dest, encoding='UTF-8')
cursor = connection.cursor()
return (connection, cursor)
def customSQL(self, sqlQuery:str) -> set:
(conn, cur) = self.__connect()
cur.execute(sqlQuery)
fetched = []
for row in cur:
fetched.append(row)
conn.close()
return fetched
def __formatEntries(self, entryList:list) -> list:
f = ""
for i, entry in enumerate(entryList):
f += f"'{entry}'"
if i != len(entryList) - 1:
f+=', '
return f
# 1000 chunks Generator
def __chunks(self, array):
for i in range(0, len(array), self.__limit):
yield array[i:i + self.__limit]
def getProductDetails(self, products:list) -> [dict]:
self._FetchedProducts = []
def fetch(l):
formatted = self.__formatEntries(l)
sqlQuery = f"""select
P.PART_NO MALZEME,
P.UNIT_MEAS BIRIM,
P.DESCRIPTION ACIKLAMA,
NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', P.PART_NO, 'FABRIKA-DEPO'), 0) FABRIKASTOK,
NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', P.PART_NO, 'KE-MAGAZA'), 0) MAGAZASTOK,
NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', P.PART_NO, 'E-TICARET'), 0) ESTOK,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_PURC_DATE(P.CONTRACT, P.PART_NO, 1), TO_DATE('11.11.1111', 'dd.mm.yyyy')) LASTPURCHDATE,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_PURC_ORDER_NO(P.CONTRACT, P.PART_NO, 1), 'None') LASTPURCHORDER,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_PURC_PRICE(P.CONTRACT, P.PART_NO, 1, 1), 0) LASTPURCHPRICE,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_PURC_QTY(P.CONTRACT, P.PART_NO, 1), 0) LASTPURCHQTY,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_SALE_DATE(P.CONTRACT, P.PART_NO, 1), TO_DATE('11.11.1111', 'dd.mm.yyyy')) LASTSALEDATE,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_SALE_ORDER_NO(P.CONTRACT, P.PART_NO, 1), 'None') LASTSALEORDER,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_SALE_PRICE(P.CONTRACT, P.PART_NO, 1, 1), 0) LASTSALEPRICE,
NVL(IFSAPP.FLASK_API.GET_PART_LAST_SALE_QTY(P.CONTRACT, P.PART_NO, 1), 0) LASTSALEQTY,
NVL(IFSAPP.FLASK_API.SALE_FREQ_OF_PRODUCT(P.CONTRACT, P.PART_NO),0) THISYEARSALE,
NVL(IFSAPP.FLASK_API.SALE_FREQ_OF_PRODUCT(P.CONTRACT, P.PART_NO, 24, 12),0) LASTYEARSALE
from INVENTORY_PART P
where P.PART_NO in ({formatted})
AND P.CONTRACT = 'KE'
"""
(conn, cur) = self.__connect()
cur.execute(sqlQuery)
fetched = []
for row in cur:
r = {
'partNo': row[0],
'unitMeas': row[1],
'description': row[2],
'fabrikaStock': row[3],
'magazaStock': row[4],
'eticaretStock': row[5],
'lastPurchDate': row[6],
'lastPurchOrder': row[7],
'lastPurchPrice': row[8],
'lastPurchQty': row[9],
'lastSaleDate': row[10],
'lastSaleOrder': row[11],
'lastSalePrice': row[12],
'lastSaleQty': row[13],
'thisYearSaleFreq': row[14],
'lastYearSaleFreq': row[15]
}
fetched.append(r)
conn.close()
self._FetchedProducts += fetched
threads = []
nextChunk = self.__chunks(products)
page = int(len(products)/self.__limit)
for _ in range(page):
x = threading.Thread(target=fetch,args=(next(nextChunk),))
threads.append(x)
x.start()
for _, thread in enumerate(threads):
thread.join()
return self._FetchedProducts
def getStocks(self, products:list) -> [dict]:
self._FetchedProducts = []
def fetch(l):
if len(l) == 0: return
formatted = self.__formatEntries(l)
sqlQuery = f"""SELECT T.PART_NO, NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'FABRIKA-DEPO'), 0) + NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'KE-MAGAZA'), 0) + NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'E-TICARET'), 0) STOCK, NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'FABRIKA-DEPO'), 0) FABRIKA, NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'KE-MAGAZA'), 0) MAGAZA, NVL(IFSAPP.FLASK_API.GET_PART_STOCK_NOW('KE', T.PART_NO, 'E-TICARET'), 0) ELEKTRIX FROM IFSAPP.INVENTORY_PART T WHERE T.contract = 'KE' AND T.PART_NO IN ({formatted})"""
(conn, cur) = self.__connect()
cur.execute(sqlQuery)
for row in cur:
r = {
'partNo': row[0],
'totalStock': row[1],
'fabrikaStock': row[2],
'magazaStock': row[3],
'eticaretStock': row[4],
}
self._FetchedProducts.append(r)
conn.close()
threads = []
nextChunk = self.__chunks(products)
page = int(len(products)/self.__limit) + 1
for _ in range(page):
x = threading.Thread(target=fetch, args=(next(nextChunk),))
threads.append(x)
x.start()
for _, thread in enumerate(threads):
thread.join()
return self._FetchedProducts
def getPrices(self, products:list) -> [dict]:
self._FetchedPrices = []
def fetch(l):
if len(l) == 0: return
formatted = self.__formatEntries(l)
sqlQuery = f"""SELECT T.PART_NO, NVL(ROUND(IFSAPP.FLASK_API.GET_PART_LAST_PURC_PRICE('KE', T.PART_NO, 0, 1), 3), 0) SON_ALIS_FIYAT_INDIRIMSIZ, NVL(ROUND(IFSAPP.FLASK_API.GET_PART_LAST_PURC_PRICE('KE', T.PART_NO, 1, 1), 3), 0) SON_ALIS_FIYAT_INDIRIMLI, NVL(ROUND(IFSAPP.FLASK_API.GET_PART_LAST_PURC_PRICE('KE',T.PART_NO, 1, 1), 3), 0) SON_ALIS_FIYAT_INDIRIMLI FROM IFSAPP.INVENTORY_PART T WHERE T.contract = 'KE' AND T.PART_NO in ({formatted})"""
(conn, cur) = self.__connect()
cur.execute(sqlQuery)
for row in cur:
r = {
'partNo': row[0],
'priceNoDiscount': row[1],
'priceDiscounted': row[2],
'purchasePrice': row[3]
}
self._FetchedPrices.append(r)
conn.close()
threads = []
nextChunk = self.__chunks(products)
page = int(len(products)/self.__limit) + 1
for _ in range(page):
x = threading.Thread(target=fetch, args=(next(nextChunk),))
threads.append(x)
x.start()
for _, thread in enumerate(threads):
thread.join()
return self._FetchedPrices
def getInvoices(self) -> [dict]:
sqlQuery = """select
s.ORDER_NO sipNo,
CUSTOMER_ORDER_API.GET_CUSTOMER_PO_NO(s.ORDER_NO) TSoftSip,
s.RESMI_FTU_NO FatNo,
s.INVOICE_ID FatID,
s.INVOICE_DATE FatTarih,
s.IDENTITY MusteriNo,
s.CUST_DESC Musteri,
(select x.value from COMM_METHOD x where x.IDENTITY = s.IDENTITY and x.METHOD_ID_DB = 'E_MAIL' and x.METHOD_DEFAULT = 'TRUE') MusteriMail,
s.SHIPMENT_ID,
sum(FLASK_API.CALC_DISCOUNT(s.NET_DOM_AMOUNT, s.DISCOUNT)) NetTutar,
sum(FLASK_API.CALC_DISCOUNT(s.GROSS_CURR_AMOUNT, s.DISCOUNT)) BrutTutar
from IFSTR_KRC_REP_SALES s
where CUSTOMER_ORDER_API.GET_SALESMAN_CODE(s.ORDER_NO) = 'ETICARET'
and s.CONTRACT = 'KE'
AND CUSTOMER_ORDER_API.GET_CUSTOMER_PO_NO(s.ORDER_NO) IS NOT NULL
group by
s.ORDER_NO,
s.RESMI_FTU_NO,
s.INVOICE_ID,
s.INVOICE_DATE,
s.IDENTITY,
s.CUST_DESC,
s.SHIPMENT_ID
"""
(conn, cur) = self.__connect()
cur.execute(sqlQuery)
fetched = []
for row in cur:
r = {
'orderNo': row[0],
'eCommOrderNo': row[1],
'invoiceNo': row[2],
'invoiceID': row[3],
'invoiceDate': row[4],
'customerNo': row[5],
'customerDesc': row[6],
'customerEmail': row[7],
'shipmentID': row[8],
'netPrice': row[9],
'grossPrice': row[10],
}
fetched.append(r)
conn.close()
return fetched |
syncobj.py | import time
import random
import os
import sys
import threading
import weakref
import collections
import functools
import struct
import logging
import copy
import types
try:
import Queue
is_py3 = False
def iteritems(v):
return v.iteritems()
except ImportError: # python3
import queue as Queue
is_py3 = True
xrange = range
def iteritems(v):
return v.items()
import pysyncobj.pickle as pickle
from .dns_resolver import globalDnsResolver
from .poller import createPoller
try:
from .pipe_notifier import PipeNotifier
PIPE_NOTIFIER_ENABLED = True
except ImportError:
PIPE_NOTIFIER_ENABLED = False
from .serializer import Serializer, SERIALIZER_STATE
from .tcp_server import TcpServer
from .node import Node, NODE_STATUS
from .journal import createJournal
from .config import SyncObjConf, FAIL_REASON
from .encryptor import HAS_CRYPTO, getEncryptor
from .version import VERSION
from .revision import REVISION
from .fast_queue import FastQueue
class _RAFT_STATE:
FOLLOWER = 0
CANDIDATE = 1
LEADER = 2
class _COMMAND_TYPE:
REGULAR = 0
NO_OP = 1
MEMBERSHIP = 2
VERSION = 3
_bchr = functools.partial(struct.pack, 'B')
class SyncObjException(Exception):
def __init__(self, errorCode, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.errorCode = errorCode
class SyncObjExceptionWrongVer(SyncObjException):
def __init__(self, ver):
SyncObjException.__init__(self, 'wrongVer')
self.ver = ver
class SyncObjConsumer(object):
def __init__(self):
self._syncObj = None
self.__properies = set()
for key in self.__dict__:
self.__properies.add(key)
def _destroy(self):
self._syncObj = None
def _serialize(self):
return dict([(k, v) for k, v in iteritems(self.__dict__) if k not in self.__properies])
def _deserialize(self, data):
for k, v in iteritems(data):
self.__dict__[k] = v
# https://github.com/bakwc/PySyncObj
class SyncObj(object):
def __init__(self, selfNodeAddr, otherNodesAddrs, conf=None, consumers=None):
"""
Main SyncObj class, you should inherit your own class from it.
:param selfNodeAddr: address of the current node server, 'host:port'
:type selfNodeAddr: str
:param otherNodesAddrs: addresses of partner nodes, ['host1:port1', 'host2:port2', ...]
:type otherNodesAddrs: list of str
:param conf: configuration object
:type conf: SyncObjConf
:param consumers: objects to be replicated
:type consumers: list of SyncObjConsumer inherited objects
"""
if conf is None:
self.__conf = SyncObjConf()
else:
self.__conf = conf
self.__conf.validate()
if self.__conf.password is not None:
if not HAS_CRYPTO:
raise ImportError("Please install 'cryptography' module")
self.__encryptor = getEncryptor(self.__conf.password)
else:
self.__encryptor = None
consumers = consumers or []
newConsumers = []
for c in consumers:
if not isinstance(c, SyncObjConsumer) and getattr(c, '_consumer', None):
c = c._consumer()
if not isinstance(c, SyncObjConsumer):
raise SyncObjException('Consumers must be inherited from SyncObjConsumer')
newConsumers.append(c)
consumers = newConsumers
self.__consumers = consumers
self.__selfNodeAddr = selfNodeAddr
self.__otherNodesAddrs = otherNodesAddrs
self.__unknownConnections = {} # descr => _Connection
self.__raftState = _RAFT_STATE.FOLLOWER
self.__raftCurrentTerm = 0
self.__votedFor = None
self.__votesCount = 0
self.__raftLeader = None
self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
self.__raftLog = createJournal(self.__conf.journalFile)
if len(self.__raftLog) == 0:
self.__raftLog.add(_bchr(_COMMAND_TYPE.NO_OP), 1, self.__raftCurrentTerm)
self.__raftCommitIndex = 1
self.__raftLastApplied = 1
self.__raftNextIndex = {}
self.__lastResponseTime = {}
self.__raftMatchIndex = {}
self.__lastSerializedTime = time.time()
self.__lastSerializedEntry = None
self.__forceLogCompaction = False
self.__leaderCommitIndex = None
self.__onReadyCalled = False
self.__changeClusterIDx = None
self.__noopIDx = None
self.__destroying = False
self.__recvTransmission = ''
self.__startTime = time.time()
globalDnsResolver().setTimeouts(self.__conf.dnsCacheTime, self.__conf.dnsFailCacheTime)
globalDnsResolver().setPreferredAddrFamily(self.__conf.preferredAddrType)
self.__serializer = Serializer(self.__conf.fullDumpFile,
self.__conf.logCompactionBatchSize,
self.__conf.useFork,
self.__conf.serializer,
self.__conf.deserializer,
self.__conf.serializeChecker)
self.__isInitialized = False
self.__lastInitTryTime = 0
self._poller = createPoller(self.__conf.pollerType)
if selfNodeAddr is not None:
bindAddr = self.__conf.bindAddress or selfNodeAddr
host, port = bindAddr.rsplit(':', 1)
host = globalDnsResolver().resolve(host)
self.__server = TcpServer(self._poller, host, port, onNewConnection=self.__onNewConnection,
sendBufferSize=self.__conf.sendBufferSize,
recvBufferSize=self.__conf.recvBufferSize,
connectionTimeout=self.__conf.connectionTimeout)
self._methodToID = {}
self._idToMethod = {}
self._idToConsumer = {}
methods = [m for m in dir(self) if callable(getattr(self, m)) and \
getattr(getattr(self, m), 'replicated', False) and \
m != getattr(getattr(self, m), 'origName')]
currMethodID = 0
self.__selfCodeVersion = 0
self.__currentVersionFuncNames = {}
methodsToEnumerate = []
for method in methods:
ver = getattr(getattr(self, method), 'ver')
methodsToEnumerate.append((ver, 0, method, self))
for consumerNum, consumer in enumerate(consumers):
consumerMethods = [m for m in dir(consumer) if callable(getattr(consumer, m)) and\
getattr(getattr(consumer, m), 'replicated', False) and \
m != getattr(getattr(consumer, m), 'origName')]
for method in consumerMethods:
ver = getattr(getattr(consumer, method), 'ver')
methodsToEnumerate.append((ver, consumerNum + 1, method, consumer))
consumer._syncObj = self
for ver, _, method, obj in sorted(methodsToEnumerate):
self.__selfCodeVersion = max(self.__selfCodeVersion, ver)
if obj is self:
self._methodToID[method] = currMethodID
else:
self._methodToID[(id(obj), method)] = currMethodID
self._idToMethod[currMethodID] = getattr(obj, method)
currMethodID += 1
self.__onSetCodeVersion(0)
self.__thread = None
self.__mainThread = None
self.__initialised = None
self.__bindedEvent = threading.Event()
self.__bindRetries = 0
self.__commandsQueue = FastQueue(self.__conf.commandsQueueSize)
if not self.__conf.appendEntriesUseBatch and PIPE_NOTIFIER_ENABLED:
self.__pipeNotifier = PipeNotifier(self._poller)
self.__nodes = []
self.__readonlyNodes = []
self.__readonlyNodesCounter = 0
self.__lastReadonlyCheck = 0
self.__newAppendEntriesTime = 0
self.__commandsWaitingCommit = collections.defaultdict(list) # logID => [(termID, callback), ...]
self.__commandsLocalCounter = 0
self.__commandsWaitingReply = {} # commandLocalCounter => callback
self.__properies = set()
for key in self.__dict__:
self.__properies.add(key)
self.__enabledCodeVersion = 0
if self.__conf.autoTick:
self.__mainThread = threading.current_thread()
self.__initialised = threading.Event()
self.__thread = threading.Thread(target=SyncObj._autoTickThread, args=(weakref.proxy(self),))
self.__thread.start()
self.__initialised.wait()
# while not self.__initialised.is_set():
# pass
else:
self.__initInTickThread()
def destroy(self):
"""
Correctly destroy SyncObj. Stop autoTickThread, close connections, etc.
"""
if self.__conf.autoTick:
self.__destroying = True
else:
self._doDestroy()
def waitBinded(self):
"""
Waits until initialized (binded port).
If success - just returns.
If failed to initialized after conf.maxBindRetries - raise SyncObjException.
"""
self.__bindedEvent.wait()
if not self.__isInitialized:
raise SyncObjException('BindError')
def _destroy(self):
self.destroy()
def _doDestroy(self):
for node in self.__nodes:
node._destroy()
for node in self.__readonlyNodes:
node._destroy()
if self.__selfNodeAddr is not None:
self.__server.unbind()
for consumer in self.__consumers:
consumer._destroy()
self.__raftLog._destroy()
def __initInTickThread(self):
try:
self.__lastInitTryTime = time.time()
if self.__selfNodeAddr is not None:
self.__server.bind()
shouldConnect = None
else:
shouldConnect = True
self.__nodes = []
for nodeAddr in self.__otherNodesAddrs:
self.__nodes.append(Node(self, nodeAddr, shouldConnect))
self.__raftNextIndex[nodeAddr] = self.__getCurrentLogIndex() + 1
self.__raftMatchIndex[nodeAddr] = 0
self.__needLoadDumpFile = True
self.__isInitialized = True
self.__bindedEvent.set()
except:
self.__bindRetries += 1
if self.__conf.maxBindRetries and self.__bindRetries >= self.__conf.maxBindRetries:
self.__bindedEvent.set()
raise SyncObjException('BindError')
logging.exception('failed to perform initialization')
def getCodeVersion(self):
return self.__enabledCodeVersion
def setCodeVersion(self, newVersion, callback = None):
"""Switch to a new code version on all cluster nodes. You
should ensure that cluster nodes are updated, otherwise they
won't be able to apply commands.
:param newVersion: new code version
:type int
:param callback: will be called on cussess or fail
:type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
"""
assert isinstance(newVersion, int)
if newVersion > self.__selfCodeVersion:
raise Exception('wrong version, current version is %d, requested version is %d' % (self.__selfCodeVersion, newVersion))
if newVersion < self.__enabledCodeVersion:
raise Exception('wrong version, enabled version is %d, requested version is %d' % (self.__enabledCodeVersion, newVersion))
self._applyCommand(pickle.dumps(newVersion), callback, _COMMAND_TYPE.VERSION)
def addNodeToCluster(self, nodeName, callback = None):
"""Add single node to cluster (dynamic membership changes). Async.
You should wait until node successfully added before adding
next node.
:param nodeName: nodeHost:nodePort
:type nodeName: str
:param callback: will be called on success or fail
:type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
"""
if not self.__conf.dynamicMembershipChange:
raise Exception('dynamicMembershipChange is disabled')
self._applyCommand(pickle.dumps(['add', nodeName]), callback, _COMMAND_TYPE.MEMBERSHIP)
def removeNodeFromCluster(self, nodeName, callback = None):
"""Remove single node from cluster (dynamic membership changes). Async.
You should wait until node successfully added before adding
next node.
:param nodeName: nodeHost:nodePort
:type nodeName: str
:param callback: will be called on success or fail
:type callback: function(`FAIL_REASON <#pysyncobj.FAIL_REASON>`_, None)
"""
if not self.__conf.dynamicMembershipChange:
raise Exception('dynamicMembershipChange is disabled')
self._applyCommand(pickle.dumps(['rem', nodeName]), callback, _COMMAND_TYPE.MEMBERSHIP)
def _addNodeToCluster(self, nodeName, callback=None):
self.addNodeToCluster(nodeName, callback)
def _removeNodeFromCluster(self, nodeName, callback=None):
self.removeNodeFromCluster(nodeName, callback)
def __onSetCodeVersion(self, newVersion):
methods = [m for m in dir(self) if callable(getattr(self, m)) and\
getattr(getattr(self, m), 'replicated', False) and \
m != getattr(getattr(self, m), 'origName')]
self.__currentVersionFuncNames = {}
funcVersions = collections.defaultdict(set)
for method in methods:
ver = getattr(getattr(self, method), 'ver')
origFuncName = getattr(getattr(self, method), 'origName')
funcVersions[origFuncName].add(ver)
for consumer in self.__consumers:
consumerID = id(consumer)
consumerMethods = [m for m in dir(consumer) if callable(getattr(consumer, m)) and \
getattr(getattr(consumer, m), 'replicated', False)]
for method in consumerMethods:
ver = getattr(getattr(consumer, method), 'ver')
origFuncName = getattr(getattr(consumer, method), 'origName')
funcVersions[(consumerID, origFuncName)].add(ver)
for funcName, versions in iteritems(funcVersions):
versions = sorted(list(versions))
for v in versions:
if v > newVersion:
break
realFuncName = funcName[1] if isinstance(funcName, tuple) else funcName
self.__currentVersionFuncNames[funcName] = realFuncName + '_v' + str(v)
def _getFuncName(self, funcName):
return self.__currentVersionFuncNames[funcName]
def _applyCommand(self, command, callback, commandType = None):
try:
if commandType is None:
self.__commandsQueue.put_nowait((command, callback))
else:
self.__commandsQueue.put_nowait((_bchr(commandType) + command, callback))
if not self.__conf.appendEntriesUseBatch and PIPE_NOTIFIER_ENABLED:
self.__pipeNotifier.notify()
except Queue.Full:
self.__callErrCallback(FAIL_REASON.QUEUE_FULL, callback)
def _checkCommandsToApply(self):
startTime = time.time()
while time.time() - startTime < self.__conf.appendEntriesPeriod:
if self.__raftLeader is None and self.__conf.commandsWaitLeader:
break
try:
command, callback = self.__commandsQueue.get_nowait()
except Queue.Empty:
break
requestNode, requestID = None, None
if isinstance(callback, tuple):
requestNode, requestID = callback
if self.__raftState == _RAFT_STATE.LEADER:
idx, term = self.__getCurrentLogIndex() + 1, self.__raftCurrentTerm
if self.__conf.dynamicMembershipChange:
changeClusterRequest = self.__parseChangeClusterRequest(command)
else:
changeClusterRequest = None
if changeClusterRequest is None or self.__changeCluster(changeClusterRequest):
self.__raftLog.add(command, idx, term)
if requestNode is None:
if callback is not None:
self.__commandsWaitingCommit[idx].append((term, callback))
else:
self.__send(requestNode, {
'type': 'apply_command_response',
'request_id': requestID,
'log_idx': idx,
'log_term': term,
})
if not self.__conf.appendEntriesUseBatch:
self.__sendAppendEntries()
else:
if requestNode is None:
if callback is not None:
callback(None, FAIL_REASON.REQUEST_DENIED)
else:
self.__send(requestNode, {
'type': 'apply_command_response',
'request_id': requestID,
'error': FAIL_REASON.REQUEST_DENIED,
})
elif self.__raftLeader is not None:
if requestNode is None:
message = {
'type': 'apply_command',
'command': command,
}
if callback is not None:
self.__commandsLocalCounter += 1
self.__commandsWaitingReply[self.__commandsLocalCounter] = callback
message['request_id'] = self.__commandsLocalCounter
self.__send(self.__raftLeader, message)
else:
self.__send(requestNode, {
'type': 'apply_command_response',
'request_id': requestID,
'error': FAIL_REASON.NOT_LEADER,
})
else:
self.__callErrCallback(FAIL_REASON.MISSING_LEADER, callback)
def _autoTickThread(self):
try:
self.__initInTickThread()
except SyncObjException as e:
if e.errorCode == 'BindError':
return
raise
finally:
self.__initialised.set()
time.sleep(0.1)
try:
while True:
if not self.__mainThread.is_alive():
break
if self.__destroying:
self._doDestroy()
break
self._onTick(self.__conf.autoTickPeriod)
except ReferenceError:
pass
def doTick(self, timeToWait=0.0):
"""Performs single tick. Should be called manually if `autoTick <#pysyncobj.SyncObjConf.autoTick>`_ disabled
:param timeToWait: max time to wait for next tick. If zero - perform single tick without waiting for new events.
Otherwise - wait for new socket event and return.
:type timeToWait: float
"""
assert not self.__conf.autoTick
self._onTick(timeToWait)
def _onTick(self, timeToWait=0.0):
if not self.__isInitialized:
if time.time() >= self.__lastInitTryTime + self.__conf.bindRetryTime:
self.__initInTickThread()
if not self.__isInitialized:
time.sleep(timeToWait)
return
if self.__needLoadDumpFile:
if self.__conf.fullDumpFile is not None and os.path.isfile(self.__conf.fullDumpFile):
self.__loadDumpFile(clearJournal=False)
self.__needLoadDumpFile = False
if self.__raftState in (_RAFT_STATE.FOLLOWER, _RAFT_STATE.CANDIDATE) and self.__selfNodeAddr is not None:
if self.__raftElectionDeadline < time.time() and self.__connectedToAnyone():
self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
self.__raftLeader = None
self.__setState(_RAFT_STATE.CANDIDATE)
self.__raftCurrentTerm += 1
self.__votedFor = self._getSelfNodeAddr()
self.__votesCount = 1
for node in self.__nodes:
node.send({
'type': 'request_vote',
'term': self.__raftCurrentTerm,
'last_log_index': self.__getCurrentLogIndex(),
'last_log_term': self.__getCurrentLogTerm(),
})
self.__onLeaderChanged()
if self.__votesCount > (len(self.__nodes) + 1) / 2:
self.__onBecomeLeader()
if self.__raftState == _RAFT_STATE.LEADER:
while self.__raftCommitIndex < self.__getCurrentLogIndex():
nextCommitIndex = self.__raftCommitIndex + 1
count = 1
for node in self.__nodes:
if self.__raftMatchIndex[node.getAddress()] >= nextCommitIndex:
count += 1
if count > (len(self.__nodes) + 1) / 2:
self.__raftCommitIndex = nextCommitIndex
else:
break
self.__leaderCommitIndex = self.__raftCommitIndex
deadline = time.time() - self.__conf.leaderFallbackTimeout
count = 1
for node in self.__nodes:
if self.__lastResponseTime[node.getAddress()] > deadline:
count += 1
if count <= (len(self.__nodes) + 1) / 2:
self.__setState(_RAFT_STATE.FOLLOWER)
self.__raftLeader = None
needSendAppendEntries = False
if self.__raftCommitIndex > self.__raftLastApplied:
count = self.__raftCommitIndex - self.__raftLastApplied
entries = self.__getEntries(self.__raftLastApplied + 1, count)
for entry in entries:
try:
currentTermID = entry[2]
subscribers = self.__commandsWaitingCommit.pop(entry[1], [])
res = self.__doApplyCommand(entry[0])
for subscribeTermID, callback in subscribers:
if subscribeTermID == currentTermID:
callback(res, FAIL_REASON.SUCCESS)
else:
callback(None, FAIL_REASON.DISCARDED)
self.__raftLastApplied += 1
except SyncObjExceptionWrongVer as e:
logging.error('request to switch to unsupported code version (self version: %d, requested version: %d)' %
(self.__selfCodeVersion, e.ver))
if not self.__conf.appendEntriesUseBatch:
needSendAppendEntries = True
if self.__raftState == _RAFT_STATE.LEADER:
if time.time() > self.__newAppendEntriesTime or needSendAppendEntries:
self.__sendAppendEntries()
if not self.__onReadyCalled and self.__raftLastApplied == self.__leaderCommitIndex:
if self.__conf.onReady:
self.__conf.onReady()
self.__onReadyCalled = True
self._checkCommandsToApply()
self.__tryLogCompaction()
for node in self.__nodes:
node.connectIfRequired()
if time.time() > self.__lastReadonlyCheck + 1.0:
self.__lastReadonlyCheck = time.time()
newReadonlyNodes = []
for node in self.__readonlyNodes:
if node.isConnected():
newReadonlyNodes.append(node)
else:
self.__raftNextIndex.pop(node, None)
self.__raftMatchIndex.pop(node, None)
node._destroy()
self._poller.poll(timeToWait)
def getStatus(self):
"""Dumps different debug info about cluster to dict and return it"""
status = {}
status['version'] = VERSION
status['revision'] = REVISION
status['self'] = self.__selfNodeAddr
status['state'] = self.__raftState
status['leader'] = self.__raftLeader
status['partner_nodes_count'] = len(self.__nodes)
for n in self.__nodes:
status['partner_node_status_server_'+n.getAddress()] = n.getStatus()
status['readonly_nodes_count'] = len(self.__readonlyNodes)
for n in self.__readonlyNodes:
status['readonly_node_status_server_'+n.getAddress()] = n.getStatus()
status['unknown_connections_count'] = len(self.__unknownConnections)
status['log_len'] = len(self.__raftLog)
status['last_applied'] = self.__raftLastApplied
status['commit_idx'] = self.__raftCommitIndex
status['raft_term'] = self.__raftCurrentTerm
status['next_node_idx_count'] = len(self.__raftNextIndex)
for k, v in iteritems(self.__raftNextIndex):
status['next_node_idx_server_'+k] = v
status['match_idx_count'] = len(self.__raftMatchIndex)
for k, v in iteritems(self.__raftMatchIndex):
status['match_idx_server_'+k] = v
status['leader_commit_idx'] = self.__leaderCommitIndex
status['uptime'] = int(time.time() - self.__startTime)
status['self_code_version'] = self.__selfCodeVersion
status['enabled_code_version'] = self.__enabledCodeVersion
return status
def _getStatus(self):
return self.getStatus()
def printStatus(self):
"""Dumps different debug info about cluster to default logger"""
status = self.getStatus()
for k, v in iteritems(status):
logging.info('%s: %s' % (str(k), str(v)))
def _printStatus(self):
self.printStatus()
def forceLogCompaction(self):
"""Force to start log compaction (without waiting required time or required number of entries)"""
self.__forceLogCompaction = True
def _forceLogCompaction(self):
self.forceLogCompaction()
def __doApplyCommand(self, command):
commandType = ord(command[:1])
# Skip no-op and membership change commands
if commandType == _COMMAND_TYPE.VERSION:
ver = pickle.loads(command[1:])
if self.__selfCodeVersion < ver:
raise SyncObjExceptionWrongVer(ver)
oldVer = self.__enabledCodeVersion
self.__enabledCodeVersion = ver
callback = self.__conf.onCodeVersionChanged
self.__onSetCodeVersion(ver)
if callback is not None:
callback(oldVer, ver)
return
if commandType != _COMMAND_TYPE.REGULAR:
return
command = pickle.loads(command[1:])
args = []
kwargs = {
'_doApply': True,
}
if not isinstance(command, tuple):
funcID = command
elif len(command) == 2:
funcID, args = command
else:
funcID, args, newKwArgs = command
kwargs.update(newKwArgs)
return self._idToMethod[funcID](*args, **kwargs)
def _onMessageReceived(self, nodeAddr, message):
if message['type'] == 'request_vote' and self.__selfNodeAddr is not None:
if message['term'] > self.__raftCurrentTerm:
self.__raftCurrentTerm = message['term']
self.__votedFor = None
self.__setState(_RAFT_STATE.FOLLOWER)
self.__raftLeader = None
if self.__raftState in (_RAFT_STATE.FOLLOWER, _RAFT_STATE.CANDIDATE):
lastLogTerm = message['last_log_term']
lastLogIdx = message['last_log_index']
if message['term'] >= self.__raftCurrentTerm:
if lastLogTerm < self.__getCurrentLogTerm():
return
if lastLogTerm == self.__getCurrentLogTerm() and \
lastLogIdx < self.__getCurrentLogIndex():
return
if self.__votedFor is not None:
return
self.__votedFor = nodeAddr
self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
self.__send(nodeAddr, {
'type': 'response_vote',
'term': message['term'],
})
if message['type'] == 'append_entries' and message['term'] >= self.__raftCurrentTerm:
self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
if self.__raftLeader != nodeAddr:
self.__onLeaderChanged()
self.__raftLeader = nodeAddr
if message['term'] > self.__raftCurrentTerm:
self.__raftCurrentTerm = message['term']
self.__votedFor = None
self.__setState(_RAFT_STATE.FOLLOWER)
newEntries = message.get('entries', [])
serialized = message.get('serialized', None)
self.__leaderCommitIndex = leaderCommitIndex = message['commit_index']
# Regular append entries
if 'prevLogIdx' in message:
transmission = message.get('transmission', None)
if transmission is not None:
if transmission == 'start':
self.__recvTransmission = message['data']
self.__sendNextNodeIdx(nodeAddr, success=False, reset=False)
return
elif transmission == 'process':
self.__recvTransmission += message['data']
self.__sendNextNodeIdx(nodeAddr, success=False, reset=False)
return
elif transmission == 'finish':
self.__recvTransmission += message['data']
newEntries = [pickle.loads(self.__recvTransmission)]
self.__recvTransmission = ''
else:
raise Exception('Wrong transmission type')
prevLogIdx = message['prevLogIdx']
prevLogTerm = message['prevLogTerm']
prevEntries = self.__getEntries(prevLogIdx)
if not prevEntries:
self.__sendNextNodeIdx(nodeAddr, success=False, reset=True)
return
if prevEntries[0][2] != prevLogTerm:
self.__sendNextNodeIdx(nodeAddr, nextNodeIdx = prevLogIdx, success = False, reset=True)
return
if len(prevEntries) > 1:
# rollback cluster changes
if self.__conf.dynamicMembershipChange:
for entry in reversed(prevEntries[1:]):
clusterChangeRequest = self.__parseChangeClusterRequest(entry[0])
if clusterChangeRequest is not None:
self.__doChangeCluster(clusterChangeRequest, reverse=True)
self.__deleteEntriesFrom(prevLogIdx + 1)
for entry in newEntries:
self.__raftLog.add(*entry)
# apply cluster changes
if self.__conf.dynamicMembershipChange:
for entry in newEntries:
clusterChangeRequest = self.__parseChangeClusterRequest(entry[0])
if clusterChangeRequest is not None:
self.__doChangeCluster(clusterChangeRequest)
nextNodeIdx = prevLogIdx + 1
if newEntries:
nextNodeIdx = newEntries[-1][1]
self.__sendNextNodeIdx(nodeAddr, nextNodeIdx=nextNodeIdx, success=True)
# Install snapshot
elif serialized is not None:
if self.__serializer.setTransmissionData(serialized):
self.__loadDumpFile(clearJournal=True)
self.__sendNextNodeIdx(nodeAddr, success=True)
self.__raftCommitIndex = min(leaderCommitIndex, self.__getCurrentLogIndex())
if message['type'] == 'apply_command':
if 'request_id' in message:
self._applyCommand(message['command'], (nodeAddr, message['request_id']))
else:
self._applyCommand(message['command'], None)
if message['type'] == 'apply_command_response':
requestID = message['request_id']
error = message.get('error', None)
callback = self.__commandsWaitingReply.pop(requestID, None)
if callback is not None:
if error is not None:
callback(None, error)
else:
idx = message['log_idx']
term = message['log_term']
assert idx > self.__raftLastApplied
self.__commandsWaitingCommit[idx].append((term, callback))
if self.__raftState == _RAFT_STATE.CANDIDATE:
if message['type'] == 'response_vote' and message['term'] == self.__raftCurrentTerm:
self.__votesCount += 1
if self.__votesCount > (len(self.__nodes) + 1) / 2:
self.__onBecomeLeader()
if self.__raftState == _RAFT_STATE.LEADER:
if message['type'] == 'next_node_idx':
reset = message['reset']
nextNodeIdx = message['next_node_idx']
success = message['success']
currentNodeIdx = nextNodeIdx - 1
if reset:
self.__raftNextIndex[nodeAddr] = nextNodeIdx
if success:
self.__raftMatchIndex[nodeAddr] = currentNodeIdx
self.__lastResponseTime[nodeAddr] = time.time()
def __callErrCallback(self, err, callback):
if callback is None:
return
if isinstance(callback, tuple):
requestNode, requestID = callback
self.__send(requestNode, {
'type': 'apply_command_response',
'request_id': requestID,
'error': err,
})
return
callback(None, err)
def __sendNextNodeIdx(self, nodeAddr, reset=False, nextNodeIdx = None, success = False):
if nextNodeIdx is None:
nextNodeIdx = self.__getCurrentLogIndex() + 1
self.__send(nodeAddr, {
'type': 'next_node_idx',
'next_node_idx': nextNodeIdx,
'reset': reset,
'success': success,
})
def __generateRaftTimeout(self):
minTimeout = self.__conf.raftMinTimeout
maxTimeout = self.__conf.raftMaxTimeout
return minTimeout + (maxTimeout - minTimeout) * random.random()
def __onNewConnection(self, conn):
descr = conn.fileno()
self.__unknownConnections[descr] = conn
if self.__encryptor:
conn.encryptor = self.__encryptor
conn.setOnMessageReceivedCallback(functools.partial(self.__onMessageReceived, conn))
conn.setOnDisconnectedCallback(functools.partial(self.__onDisconnected, conn))
def __utilityCallback(self, res, err, conn, cmd, node):
cmdResult = 'FAIL'
if err == FAIL_REASON.SUCCESS:
cmdResult = 'SUCCESS'
conn.send(cmdResult + ' ' + cmd + ' ' + node)
def __onUtilityMessage(self, conn, message):
try:
if message[0] == 'status':
conn.send(self.getStatus())
return True
elif message[0] == 'add':
self.addNodeToCluster(message[1], callback=functools.partial(self.__utilityCallback, conn=conn, cmd='ADD', node=message[1]))
return True
elif message[0] == 'remove':
if message[1] == self.__selfNodeAddr:
conn.send('FAIL REMOVE ' + message[1])
else:
self.removeNodeFromCluster(message[1], callback=functools.partial(self.__utilityCallback, conn=conn, cmd='REMOVE', node=message[1]))
return True
elif message[0] == 'set_version':
self.setCodeVersion(message[1], callback=functools.partial(self.__utilityCallback, conn=conn, cmd='SET_VERSION', node=str(message[1])))
return True
except Exception as e:
conn.send(str(e))
return True
return False
def __onMessageReceived(self, conn, message):
if self.__encryptor and not conn.sendRandKey:
conn.sendRandKey = message
conn.recvRandKey = os.urandom(32)
conn.send(conn.recvRandKey)
return
descr = conn.fileno()
if isinstance(message, list) and self.__onUtilityMessage(conn, message):
self.__unknownConnections.pop(descr, None)
return
partnerNode = None
for node in self.__nodes:
if node.getAddress() == message:
partnerNode = node
break
if partnerNode is None and message != 'readonly':
conn.disconnect()
self.__unknownConnections.pop(descr, None)
return
if partnerNode is not None:
partnerNode.onPartnerConnected(conn)
else:
nodeAddr = str(self.__readonlyNodesCounter)
node = Node(self, nodeAddr, shouldConnect=False)
node.onPartnerConnected(conn)
self.__readonlyNodes.append(node)
self.__raftNextIndex[nodeAddr] = self.__getCurrentLogIndex() + 1
self.__raftMatchIndex[nodeAddr] = 0
self.__readonlyNodesCounter += 1
self.__unknownConnections.pop(descr, None)
def __onDisconnected(self, conn):
self.__unknownConnections.pop(conn.fileno(), None)
def __getCurrentLogIndex(self):
return self.__raftLog[-1][1]
def __getCurrentLogTerm(self):
return self.__raftLog[-1][2]
def __getPrevLogIndexTerm(self, nextNodeIndex):
prevIndex = nextNodeIndex - 1
entries = self.__getEntries(prevIndex, 1)
if entries:
return prevIndex, entries[0][2]
return None, None
def __getEntries(self, fromIDx, count=None, maxSizeBytes = None):
firstEntryIDx = self.__raftLog[0][1]
if fromIDx is None or fromIDx < firstEntryIDx:
return []
diff = fromIDx - firstEntryIDx
if count is None:
result = self.__raftLog[diff:]
else:
result = self.__raftLog[diff:diff + count]
if maxSizeBytes is None:
return result
totalSize = 0
i = 0
for i, entry in enumerate(result):
totalSize += len(entry[0])
if totalSize >= maxSizeBytes:
break
return result[:i + 1]
def _isLeader(self):
""" Check if current node has a leader state.
WARNING: there could be multiple leaders at the same time!
:return: True if leader, False otherwise
:rtype: bool
"""
return self.__raftState == _RAFT_STATE.LEADER
def _getLeader(self):
""" Returns last known leader.
WARNING: this information could be outdated, eg. there could be another leader selected!
WARNING: there could be multiple leaders at the same time!
:return: Address of the last known leader node.
:rtype: str
"""
return self.__raftLeader
def isReady(self):
"""Check if current node is initially synced with others and has an actual data.
:return: True if ready, False otherwise
:rtype: bool
"""
return self.__onReadyCalled
def _isReady(self):
return self.isReady()
def _getTerm(self):
return self.__raftCurrentTerm
def _getRaftLogSize(self):
return len(self.__raftLog)
def __deleteEntriesFrom(self, fromIDx):
firstEntryIDx = self.__raftLog[0][1]
diff = fromIDx - firstEntryIDx
if diff < 0:
return
self.__raftLog.deleteEntriesFrom(diff)
def __deleteEntriesTo(self, toIDx):
firstEntryIDx = self.__raftLog[0][1]
diff = toIDx - firstEntryIDx
if diff < 0:
return
self.__raftLog.deleteEntriesTo(diff)
def __onBecomeLeader(self):
self.__raftLeader = self.__selfNodeAddr
self.__setState(_RAFT_STATE.LEADER)
self.__lastResponseTime.clear()
for node in self.__nodes + self.__readonlyNodes:
nodeAddr = node.getAddress()
self.__raftNextIndex[nodeAddr] = self.__getCurrentLogIndex() + 1
self.__raftMatchIndex[nodeAddr] = 0
self.__lastResponseTime[node.getAddress()] = time.time()
# No-op command after leader election.
idx, term = self.__getCurrentLogIndex() + 1, self.__raftCurrentTerm
self.__raftLog.add(_bchr(_COMMAND_TYPE.NO_OP), idx, term)
self.__noopIDx = idx
if not self.__conf.appendEntriesUseBatch:
self.__sendAppendEntries()
self.__sendAppendEntries()
def __setState(self, newState):
oldState = self.__raftState
self.__raftState = newState
callback = self.__conf.onStateChanged
if callback is not None and oldState != newState:
callback(oldState, newState)
def __onLeaderChanged(self):
for id in sorted(self.__commandsWaitingReply):
self.__commandsWaitingReply[id](None, FAIL_REASON.LEADER_CHANGED)
self.__commandsWaitingReply = {}
def __sendAppendEntries(self):
self.__newAppendEntriesTime = time.time() + self.__conf.appendEntriesPeriod
startTime = time.time()
batchSizeBytes = self.__conf.appendEntriesBatchSizeBytes
for node in self.__nodes + self.__readonlyNodes:
nodeAddr = node.getAddress()
if not node.isConnected():
self.__serializer.cancelTransmisstion(nodeAddr)
continue
sendSingle = True
sendingSerialized = False
nextNodeIndex = self.__raftNextIndex[nodeAddr]
while nextNodeIndex <= self.__getCurrentLogIndex() or sendSingle or sendingSerialized:
if nextNodeIndex > self.__raftLog[0][1]:
prevLogIdx, prevLogTerm = self.__getPrevLogIndexTerm(nextNodeIndex)
entries = []
if nextNodeIndex <= self.__getCurrentLogIndex():
entries = self.__getEntries(nextNodeIndex, None, batchSizeBytes)
self.__raftNextIndex[nodeAddr] = entries[-1][1] + 1
if len(entries) == 1 and len(entries[0][0]) >= batchSizeBytes:
entry = pickle.dumps(entries[0])
for pos in xrange(0, len(entry), batchSizeBytes):
currData = entry[pos:pos + batchSizeBytes]
if pos == 0:
transmission = 'start'
elif pos + batchSizeBytes >= len(entries[0][0]):
transmission = 'finish'
else:
transmission = 'process'
message = {
'type': 'append_entries',
'transmission': transmission,
'data': currData,
'term': self.__raftCurrentTerm,
'commit_index': self.__raftCommitIndex,
'prevLogIdx': prevLogIdx,
'prevLogTerm': prevLogTerm,
}
node.send(message)
else:
message = {
'type': 'append_entries',
'term': self.__raftCurrentTerm,
'commit_index': self.__raftCommitIndex,
'entries': entries,
'prevLogIdx': prevLogIdx,
'prevLogTerm': prevLogTerm,
}
node.send(message)
else:
transmissionData = self.__serializer.getTransmissionData(nodeAddr)
message = {
'type': 'append_entries',
'term': self.__raftCurrentTerm,
'commit_index': self.__raftCommitIndex,
'serialized': transmissionData,
}
node.send(message)
if transmissionData is not None:
isLast = transmissionData[2]
if isLast:
self.__raftNextIndex[nodeAddr] = self.__raftLog[1][1] + 1
sendingSerialized = False
else:
sendingSerialized = True
else:
sendingSerialized = False
nextNodeIndex = self.__raftNextIndex[nodeAddr]
sendSingle = False
delta = time.time() - startTime
if delta > self.__conf.appendEntriesPeriod:
break
def __send(self, nodeAddr, message):
for node in self.__nodes + self.__readonlyNodes:
if node.getAddress() == nodeAddr:
node.send(message)
break
def __connectedToAnyone(self):
for node in self.__nodes:
if node.getStatus() == NODE_STATUS.CONNECTED:
return True
if not self.__nodes:
return True
return False
def _getSelfNodeAddr(self):
return self.__selfNodeAddr
def _getConf(self):
return self.__conf
def _getEncryptor(self):
return self.__encryptor
def __changeCluster(self, request):
if self.__raftLastApplied < self.__noopIDx:
# No-op entry was not commited yet
return False
if self.__changeClusterIDx is not None:
if self.__raftLastApplied >= self.__changeClusterIDx:
self.__changeClusterIDx = None
# Previous cluster change request was not commited yet
if self.__changeClusterIDx is not None:
return False
return self.__doChangeCluster(request)
def __setCodeVersion(self, newVersion):
self.__enabledCodeVersion = newVersion
def __doChangeCluster(self, request, reverse = False):
requestType = request[0]
requestNode = request[1]
if requestType == 'add':
adding = not reverse
elif requestType == 'rem':
adding = reverse
else:
return False
if self.__selfNodeAddr is not None:
shouldConnect = None
else:
shouldConnect = True
if adding:
newNode = requestNode
# Node already exists in cluster
if newNode == self.__selfNodeAddr or newNode in self.__otherNodesAddrs:
return False
self.__otherNodesAddrs.append(newNode)
self.__nodes.append(Node(self, newNode, shouldConnect))
self.__raftNextIndex[newNode] = self.__getCurrentLogIndex() + 1
self.__raftMatchIndex[newNode] = 0
if self._isLeader():
self.__lastResponseTime[newNode] = time.time()
return True
else:
oldNode = requestNode
if oldNode == self.__selfNodeAddr:
return False
if oldNode not in self.__otherNodesAddrs:
return False
for i in xrange(len(self.__nodes)):
if self.__nodes[i].getAddress() == oldNode:
self.__nodes[i]._destroy()
self.__nodes.pop(i)
self.__otherNodesAddrs.remove(oldNode)
del self.__raftNextIndex[oldNode]
del self.__raftMatchIndex[oldNode]
return True
return False
def __parseChangeClusterRequest(self, command):
commandType = ord(command[:1])
if commandType != _COMMAND_TYPE.MEMBERSHIP:
return None
return pickle.loads(command[1:])
def __tryLogCompaction(self):
currTime = time.time()
serializeState, serializeID = self.__serializer.checkSerializing()
if serializeState == SERIALIZER_STATE.SUCCESS:
self.__lastSerializedTime = currTime
self.__deleteEntriesTo(serializeID)
self.__lastSerializedEntry = serializeID
if serializeState == SERIALIZER_STATE.FAILED:
logging.warning('Failed to store full dump')
if serializeState != SERIALIZER_STATE.NOT_SERIALIZING:
return
if len(self.__raftLog) <= self.__conf.logCompactionMinEntries and \
currTime - self.__lastSerializedTime <= self.__conf.logCompactionMinTime and \
not self.__forceLogCompaction:
return
if self.__conf.logCompactionSplit:
allNodes = sorted(self.__otherNodesAddrs + [self.__selfNodeAddr])
nodesCount = len(allNodes)
selfIdx = allNodes.index(self.__selfNodeAddr)
interval = self.__conf.logCompactionMinTime
periodStart = int(currTime) / interval * interval
nodeInterval = float(interval) / nodesCount
nodeIntervalStart = periodStart + selfIdx * nodeInterval
nodeIntervalEnd = nodeIntervalStart + 0.3 * nodeInterval
if currTime < nodeIntervalStart or currTime >= nodeIntervalEnd:
return
self.__forceLogCompaction = False
lastAppliedEntries = self.__getEntries(self.__raftLastApplied - 1, 2)
if len(lastAppliedEntries) < 2 or lastAppliedEntries[0][1] == self.__lastSerializedEntry:
self.__lastSerializedTime = currTime
return
if self.__conf.serializer is None:
selfData = dict([(k, v) for k, v in iteritems(self.__dict__) if k not in self.__properies])
data = selfData
if self.__consumers:
data = [selfData]
for consumer in self.__consumers:
data.append(consumer._serialize())
else:
data = None
cluster = self.__otherNodesAddrs + [self.__selfNodeAddr]
self.__serializer.serialize((data, lastAppliedEntries[1], lastAppliedEntries[0], cluster), lastAppliedEntries[0][1])
def __loadDumpFile(self, clearJournal):
try:
data = self.__serializer.deserialize()
if data[0] is not None:
if self.__consumers:
selfData = data[0][0]
consumersData = data[0][1:]
else:
selfData = data[0]
consumersData = []
for k, v in iteritems(selfData):
self.__dict__[k] = v
for i, consumer in enumerate(self.__consumers):
consumer._deserialize(consumersData[i])
if clearJournal or \
len(self.__raftLog) < 2 or \
self.__raftLog[0] != data[2] or \
self.__raftLog[1] != data[1]:
self.__raftLog.clear()
self.__raftLog.add(*data[2])
self.__raftLog.add(*data[1])
self.__raftLastApplied = data[1][1]
if self.__conf.dynamicMembershipChange:
self.__otherNodesAddrs = [node for node in data[3] if node != self.__selfNodeAddr]
self.__updateClusterConfiguration()
self.__onSetCodeVersion(0)
except:
logging.exception('failed to load full dump')
def __updateClusterConfiguration(self):
currentNodes = set()
for i in xrange(len(self.__nodes) -1, -1, -1):
nodeAddr = self.__nodes[i].getAddress()
if nodeAddr not in self.__otherNodesAddrs:
self.__nodes[i]._destroy()
self.__nodes.pop(i)
else:
currentNodes.add(nodeAddr)
if self.__selfNodeAddr is not None:
shouldConnect = None
else:
shouldConnect = True
for nodeAddr in self.__otherNodesAddrs:
if nodeAddr not in currentNodes:
self.__nodes.append(Node(self, nodeAddr, shouldConnect))
self.__raftNextIndex[nodeAddr] = self.__getCurrentLogIndex() + 1
self.__raftMatchIndex[nodeAddr] = 0
def __copy_func(f, name):
if is_py3:
res = types.FunctionType(f.__code__, f.__globals__, name, f.__defaults__, f.__closure__)
res.__dict__ = f.__dict__
else:
res = types.FunctionType(f.func_code, f.func_globals, name, f.func_defaults, f.func_closure)
res.func_dict = f.func_dict
return res
class AsyncResult(object):
def __init__(self):
self.result = None
self.error = None
self.event = threading.Event()
def onResult(self, res, err):
self.result = res
self.error = err
self.event.set()
def replicated(*decArgs, **decKwargs):
"""Replicated decorator. Use it to mark your class members that modifies
a class state. Function will be called asynchronously. Function accepts
flowing additional parameters (optional):
'callback': callback(result, failReason), failReason - `FAIL_REASON <#pysyncobj.FAIL_REASON>`_.
'sync': True - to block execution and wait for result, False - async call. If callback is passed,
'sync' option is ignored.
'timeout': if 'sync' is enabled, and no result is available for 'timeout' seconds -
SyncObjException will be raised.
These parameters are reserved and should not be used in kwargs of your replicated method.
:param func: arbitrary class member
:type func: function
:param ver: (optional) - code version (for zero deployment)
:type ver: int
"""
def replicatedImpl(func):
def newFunc(self, *args, **kwargs):
if kwargs.pop('_doApply', False):
return func(self, *args, **kwargs)
else:
if isinstance(self, SyncObj):
applier = self._applyCommand
funcName = self._getFuncName(func.__name__)
funcID = self._methodToID[funcName]
elif isinstance(self, SyncObjConsumer):
consumerId = id(self)
funcName = self._syncObj._getFuncName((consumerId, func.__name__))
funcID = self._syncObj._methodToID[(consumerId, funcName)]
applier = self._syncObj._applyCommand
else:
raise SyncObjException("Class should be inherited from SyncObj or SyncObjConsumer")
callback = kwargs.pop('callback', None)
if kwargs:
cmd = (funcID, args, kwargs)
elif args and not kwargs:
cmd = (funcID, args)
else:
cmd = funcID
sync = kwargs.pop('sync', False)
if callback is not None:
sync = False
if sync:
asyncResult = AsyncResult()
callback = asyncResult.onResult
timeout = kwargs.pop('timeout', None)
applier(pickle.dumps(cmd), callback, _COMMAND_TYPE.REGULAR)
if sync:
res = asyncResult.event.wait(timeout)
if not res:
raise SyncObjException('Timeout')
if not asyncResult.error == 0:
raise SyncObjException(asyncResult.error)
return asyncResult.result
func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict
func_dict['replicated'] = True
func_dict['ver'] = int(decKwargs.get('ver', 0))
func_dict['origName'] = func.__name__
callframe = sys._getframe(1 if decKwargs else 2)
namespace = callframe.f_locals
newFuncName = func.__name__ + '_v' + str(func_dict['ver'])
namespace[newFuncName] = __copy_func(newFunc, newFuncName)
functools.update_wrapper(newFunc, func)
return newFunc
if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]):
return replicatedImpl(decArgs[0])
return replicatedImpl
def replicated_sync(*decArgs, **decKwargs):
def replicated_sync_impl(func, timeout = None):
"""Same as replicated, but synchronous by default.
:param func: arbitrary class member
:type func: function
:param timeout: time to wait (seconds). Default: None
:type timeout: float or None
"""
def newFunc(self, *args, **kwargs):
if kwargs.get('_doApply', False):
return replicated(func)(self, *args, **kwargs)
else:
kwargs.setdefault('timeout', timeout)
kwargs.setdefault('sync', True)
return replicated(func)(self, *args, **kwargs)
func_dict = newFunc.__dict__ if is_py3 else newFunc.func_dict
func_dict['replicated'] = True
func_dict['ver'] = int(decKwargs.get('ver', 0))
func_dict['origName'] = func.__name__
callframe = sys._getframe(1 if decKwargs else 2)
namespace = callframe.f_locals
newFuncName = func.__name__ + '_v' + str(func_dict['ver'])
namespace[newFuncName] = __copy_func(newFunc, newFuncName)
functools.update_wrapper(newFunc, func)
return newFunc
if len(decArgs) == 1 and len(decKwargs) == 0 and callable(decArgs[0]):
return replicated_sync_impl(decArgs[0])
return replicated_sync_impl
|
client.py | import threading
from socket import *
from typing import Callable, Any
class m_client_socket:
host: str
port: int
socket_obj: socket
receiving_data_on: Callable[[bytes], Any]
disconnection_on: Callable[[], Any]
def __init__(self, host: str, port: int,
receiving_data_on: Callable[[bytes], Any] = None,
disconnection_on: Callable[[], Any] = None):
self.host = host
self.port = port
self.receiving_data_on = receiving_data_on
self.disconnection_on = disconnection_on
self.socket_obj = socket(AF_INET, SOCK_STREAM)
def __receiving_server_data(self):
while 1:
try:
data = self.socket_obj.recv(1024)
if data == b"":
break
if self.receiving_data_on is not None:
threading.Thread(target=self.receiving_data_on, args=(data,)).start()
except Exception:
break
if self.disconnection_on is not None:
self.disconnection_on()
def connect(self):
try:
self.socket_obj.connect((self.host, self.port))
threading.Thread(target=self.__receiving_server_data).start()
except Exception:
return False
return True
def send(self, data: bytes):
self.socket_obj.send(data)
|
CommandLine.py | from Smartiome.Core.APIManager import *
from prettytable import PrettyTable
import threading
import time
@APIManager.plugin_register("CommandLine")
class CommandLine:
def __init__(self, __queue, eventManager=None):
self.eventManager = eventManager # Allows Plugins send event
self.__queue = __queue
print("CommandLine is starting...")
pass
def SendMessage(self, PLUGINS=""):
event = Event(type_ = EType.DEFAULT)
event.data["target"] = input("Target:")
event.data["source"] = "CommandLine"
event.data["recipient"] = input("Recipient Id:")
event.data["content"] = input("Content:")
self.__queue.put(event)
# print(self.__queue.qsize())
def ReceiveMessage(self, PLUGINS, event=None, str_list=False):
if event:
# print("called")
if str_list:
event = Event(eval(event))
x = PrettyTable(["Event Attributes", "Values"])
x.align["Event Attributes"] = "1"
x.padding_width = 1
x.add_row(["Target:", event.data["target"]])
x.add_row(["Source:", event.data["source"]])
x.add_row(["Recipient Id:", event.data["recipient"]])
x.add_row(["Message Content:", event.data["content"]])
print("\n")
print(x)
pass
def start_worker(self):
t = threading.Thread(target=self.worker, args=())
t.setDaemon(True)
t.start()
def worker(self):
print("CommandLine Started")
time.sleep(1)
# lock.release()
while True:
time.sleep(0.3)
self.SendMessage()
|
phytozome_ortholog_mappingServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from phytozome_ortholog_mapping.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'phytozome_ortholog_mapping'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from phytozome_ortholog_mapping.phytozome_ortholog_mappingImpl import phytozome_ortholog_mapping # noqa @IgnorePep8
impl_phytozome_ortholog_mapping = phytozome_ortholog_mapping(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'phytozome_ortholog_mapping'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_phytozome_ortholog_mapping.map_phytozome_orthologs,
name='phytozome_ortholog_mapping.map_phytozome_orthologs',
types=[dict])
self.method_authentication['phytozome_ortholog_mapping.map_phytozome_orthologs'] = 'required' # noqa
self.rpc_service.add(impl_phytozome_ortholog_mapping.status,
name='phytozome_ortholog_mapping.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'phytozome_ortholog_mapping ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
worker.py | '''
brozzler/worker.py - BrozzlerWorker brozzles pages from the frontier, meaning
it runs youtube-dl on them, browses them and runs behaviors if appropriate,
scopes and adds outlinks to the frontier
Copyright (C) 2014-2018 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import brozzler
import brozzler.browser
import threading
import time
import urllib.request
import json
import PIL.Image
import io
import socket
import random
import requests
import doublethink
import tempfile
import urlcanon
from requests.structures import CaseInsensitiveDict
import rethinkdb as r
from . import ydl
class BrozzlerWorker:
logger = logging.getLogger(__module__ + "." + __qualname__)
# 3⅓ min heartbeat interval => 10 min ttl
# This is kind of a long time, because `frontier.claim_sites()`, which runs
# in the same thread as the heartbeats, can take a while on a busy brozzler
# cluster with slow rethinkdb.
HEARTBEAT_INTERVAL = 200.0
SITE_SESSION_MINUTES = 15
def __init__(
self, frontier, service_registry=None, max_browsers=1,
chrome_exe="chromium-browser", warcprox_auto=False, proxy=None,
skip_extract_outlinks=False, skip_visit_hashtags=False,
skip_youtube_dl=False, simpler404=False, screenshot_full_page=False,
page_timeout=300, behavior_timeout=900):
self._frontier = frontier
self._service_registry = service_registry
self._max_browsers = max_browsers
self._warcprox_auto = warcprox_auto
self._proxy = proxy
assert not (warcprox_auto and proxy)
self._proxy_is_warcprox = None
self._skip_extract_outlinks = skip_extract_outlinks
self._skip_visit_hashtags = skip_visit_hashtags
self._skip_youtube_dl = skip_youtube_dl
self._simpler404 = simpler404
self._screenshot_full_page = screenshot_full_page
self._page_timeout = page_timeout
self._behavior_timeout = behavior_timeout
self._browser_pool = brozzler.browser.BrowserPool(
max_browsers, chrome_exe=chrome_exe, ignore_cert_errors=True)
self._browsing_threads = set()
self._browsing_threads_lock = threading.Lock()
self._thread = None
self._start_stop_lock = threading.Lock()
self._shutdown = threading.Event()
def _choose_warcprox(self):
warcproxes = self._service_registry.available_services('warcprox')
if not warcproxes:
return None
# .group('proxy').count() makes this query about 99% more efficient
reql = self._frontier.rr.table('sites').between(
['ACTIVE', r.minval], ['ACTIVE', r.maxval],
index='sites_last_disclaimed').group('proxy').count()
# returns results like
# {
# "wbgrp-svc030.us.archive.org:8000": 148,
# "wbgrp-svc030.us.archive.org:8001": 145
# }
proxy_scoreboard = dict(reql.run())
for warcprox in warcproxes:
address = '%s:%s' % (warcprox['host'], warcprox['port'])
warcprox['assigned_sites'] = proxy_scoreboard.get(address, 0)
warcproxes.sort(key=lambda warcprox: (
warcprox['assigned_sites'], warcprox['load']))
# XXX make this heuristic more advanced?
return warcproxes[0]
def _proxy_for(self, site):
if self._proxy:
return self._proxy
elif site.proxy:
return site.proxy
elif self._warcprox_auto:
svc = self._choose_warcprox()
if svc is None:
raise brozzler.ProxyError(
'no available instances of warcprox in the service '
'registry')
site.proxy = '%s:%s' % (svc['host'], svc['port'])
site.save()
self.logger.info(
'chose warcprox instance %r from service registry for %r',
site.proxy, site)
return site.proxy
return None
def _using_warcprox(self, site):
if self._proxy:
if self._proxy_is_warcprox is None:
try:
response = requests.get('http://%s/status' % self._proxy)
status = json.loads(response.text)
self._proxy_is_warcprox = (status['role'] == 'warcprox')
except Exception as e:
self._proxy_is_warcprox = False
logging.info(
'%s %s warcprox', self._proxy,
'IS' if self._proxy_is_warcprox else 'IS NOT')
return self._proxy_is_warcprox
else:
# I should have commented when I originally wrote this code, but I
# think this works because `site.proxy` is only set when the proxy
# is warcprox
return bool(site.proxy or self._warcprox_auto)
def _warcprox_write_record(
self, warcprox_address, url, warc_type, content_type,
payload, extra_headers=None):
headers = {"Content-Type":content_type,"WARC-Type":warc_type,"Host":"N/A"}
if extra_headers:
headers.update(extra_headers)
request = urllib.request.Request(url, method="WARCPROX_WRITE_RECORD",
headers=headers, data=payload)
# XXX setting request.type="http" is a hack to stop urllib from trying
# to tunnel if url is https
request.type = "http"
request.set_proxy(warcprox_address, "http")
try:
with urllib.request.urlopen(request, timeout=600) as response:
if response.getcode() != 204:
self.logger.warning(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
response.getcode(), response.reason)
return request, response
except urllib.error.HTTPError as e:
self.logger.warning(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
e.getcode(), e.info())
return request, None
except urllib.error.URLError as e:
raise brozzler.ProxyError(
'proxy error on WARCPROX_WRITE_RECORD %s' % url) from e
except ConnectionError as e:
raise brozzler.ProxyError(
'proxy error on WARCPROX_WRITE_RECORD %s' % url) from e
def thumb_jpeg(self, full_jpeg):
"""Create JPEG thumbnail.
"""
img = PIL.Image.open(io.BytesIO(full_jpeg))
thumb_width = 300
thumb_height = (thumb_width / img.size[0]) * img.size[1]
img.thumbnail((thumb_width, thumb_height))
out = io.BytesIO()
img.save(out, "jpeg", quality=95)
return out.getbuffer()
def brozzle_page(self, browser, site, page, on_screenshot=None,
on_request=None, enable_youtube_dl=True):
self.logger.info("brozzling {}".format(page))
ydl_fetches = None
outlinks = set()
if enable_youtube_dl:
try:
ydl_fetches, outlinks = ydl.do_youtube_dl(self, site, page)
except brozzler.ReachedLimit as e:
raise
except brozzler.ShutdownRequested:
raise
except brozzler.ProxyError:
raise
except Exception as e:
if (hasattr(e, 'exc_info') and len(e.exc_info) >= 2
and hasattr(e.exc_info[1], 'code')
and e.exc_info[1].code == 430):
self.logger.info(
'youtube-dl got %s %s processing %s',
e.exc_info[1].code, e.exc_info[1].msg, page.url)
else:
self.logger.error(
'youtube_dl raised exception on %s', page,
exc_info=True)
if self._needs_browsing(page, ydl_fetches):
self.logger.info('needs browsing: %s', page)
try:
browser_outlinks = self._browse_page(
browser, site, page, on_screenshot, on_request)
outlinks.update(browser_outlinks)
except brozzler.PageInterstitialShown:
self.logger.info('page interstitial shown (http auth): %s', page)
else:
if not self._already_fetched(page, ydl_fetches):
self.logger.info('needs fetch: %s', page)
self._fetch_url(site, page.url)
else:
self.logger.info('already fetched: %s', page)
return outlinks
def _browse_page(self, browser, site, page, on_screenshot=None, on_request=None):
def _on_screenshot(screenshot_jpeg):
if on_screenshot:
on_screenshot(screenshot_jpeg)
if self._using_warcprox(site):
self.logger.info(
"sending WARCPROX_WRITE_RECORD request to %s with "
"screenshot for %s", self._proxy_for(site), page)
thumbnail_jpeg = self.thumb_jpeg(screenshot_jpeg)
self._warcprox_write_record(
warcprox_address=self._proxy_for(site),
url="screenshot:%s" % str(urlcanon.semantic(page.url)),
warc_type="resource", content_type="image/jpeg",
payload=screenshot_jpeg,
extra_headers=site.extra_headers())
self._warcprox_write_record(
warcprox_address=self._proxy_for(site),
url="thumbnail:%s" % str(urlcanon.semantic(page.url)),
warc_type="resource", content_type="image/jpeg",
payload=thumbnail_jpeg,
extra_headers=site.extra_headers())
def _on_response(chrome_msg):
if ('params' in chrome_msg
and 'response' in chrome_msg['params']
and 'mimeType' in chrome_msg['params']['response']
and chrome_msg['params']['response'].get('mimeType', '').startswith('video/')
# skip manifests of DASH segmented video -
# see https://github.com/internetarchive/brozzler/pull/70
and chrome_msg['params']['response']['mimeType'] != 'video/vnd.mpeg.dash.mpd'
and chrome_msg['params']['response'].get('status') in (200, 206)):
video = {
'blame': 'browser',
'url': chrome_msg['params']['response'].get('url'),
'response_code': chrome_msg['params']['response']['status'],
'content-type': chrome_msg['params']['response']['mimeType'],
}
response_headers = CaseInsensitiveDict(
chrome_msg['params']['response']['headers'])
if 'content-length' in response_headers:
video['content-length'] = int(response_headers['content-length'])
if 'content-range' in response_headers:
video['content-range'] = response_headers['content-range']
logging.debug('embedded video %s', video)
if not 'videos' in page:
page.videos = []
page.videos.append(video)
sw_fetched = set()
def _on_service_worker_version_updated(chrome_msg):
# https://github.com/internetarchive/brozzler/issues/140
self.logger.trace('%r', chrome_msg)
if chrome_msg.get('params', {}).get('versions'):
url = chrome_msg.get('params', {}).get('versions')[0]\
.get('scriptURL')
if url and url not in sw_fetched:
self.logger.info('fetching service worker script %s', url)
self._fetch_url(site, url)
sw_fetched.add(url)
if not browser.is_running():
browser.start(
proxy=self._proxy_for(site),
cookie_db=site.get('cookie_db'))
final_page_url, outlinks = browser.browse_page(
page.url, extra_headers=site.extra_headers(),
behavior_parameters=site.get('behavior_parameters'),
username=site.get('username'), password=site.get('password'),
user_agent=site.get('user_agent'),
on_screenshot=_on_screenshot, on_response=_on_response,
on_request=on_request,
on_service_worker_version_updated=_on_service_worker_version_updated,
hashtags=page.hashtags,
skip_extract_outlinks=self._skip_extract_outlinks,
skip_visit_hashtags=self._skip_visit_hashtags,
skip_youtube_dl=self._skip_youtube_dl,
simpler404=self._simpler404,
screenshot_full_page=self._screenshot_full_page,
page_timeout=self._page_timeout,
behavior_timeout=self._behavior_timeout)
if final_page_url != page.url:
page.note_redirect(final_page_url)
return outlinks
def _fetch_url(self, site, url):
proxies = None
if self._proxy_for(site):
proxies = {
'http': 'http://%s' % self._proxy_for(site),
'https': 'http://%s' % self._proxy_for(site),
}
self.logger.info('fetching %s', url)
try:
# response is ignored
requests.get(
url, proxies=proxies, headers=site.extra_headers(),
verify=False)
except requests.exceptions.ProxyError as e:
raise brozzler.ProxyError(
'proxy error fetching %s' % url) from e
def _needs_browsing(self, page, ydl_fetches):
if ydl_fetches:
final_bounces = ydl.final_bounces(ydl_fetches, page.url)
if not final_bounces:
return True
for txn in final_bounces:
if txn['response_headers'].get_content_type() in [
'text/html', 'application/xhtml+xml']:
return True
return False
else:
return True
def _already_fetched(self, page, ydl_fetches):
if ydl_fetches:
for fetch in ydl.final_bounces(ydl_fetches, page.url):
if (fetch['method'] == 'GET' and fetch['response_code'] == 200):
return True
return False
def brozzle_site(self, browser, site):
try:
site.last_claimed_by = '%s:%s' % (
socket.gethostname(), browser.chrome.port)
site.save()
start = time.time()
page = None
self._frontier.enforce_time_limit(site)
self._frontier.honor_stop_request(site)
# _proxy_for() call in log statement can raise brozzler.ProxyError
# which is why we honor time limit and stop request first☝🏻
self.logger.info(
"brozzling site (proxy=%r) %s",
self._proxy_for(site), site)
while time.time() - start < self.SITE_SESSION_MINUTES * 60:
site.refresh()
self._frontier.enforce_time_limit(site)
self._frontier.honor_stop_request(site)
page = self._frontier.claim_page(site, "%s:%s" % (
socket.gethostname(), browser.chrome.port))
if (page.needs_robots_check and
not brozzler.is_permitted_by_robots(
site, page.url, self._proxy_for(site))):
logging.warning("page %s is blocked by robots.txt", page.url)
page.blocked_by_robots = True
self._frontier.completed_page(site, page)
else:
outlinks = self.brozzle_page(
browser, site, page,
enable_youtube_dl=not self._skip_youtube_dl)
self._frontier.completed_page(site, page)
self._frontier.scope_and_schedule_outlinks(
site, page, outlinks)
if browser.is_running():
site.cookie_db = browser.chrome.persist_and_read_cookie_db()
page = None
except brozzler.ShutdownRequested:
self.logger.info("shutdown requested")
except brozzler.NothingToClaim:
self.logger.info("no pages left for site %s", site)
except brozzler.ReachedLimit as e:
self._frontier.reached_limit(site, e)
except brozzler.ReachedTimeLimit as e:
self._frontier.finished(site, "FINISHED_TIME_LIMIT")
except brozzler.CrawlStopped:
self._frontier.finished(site, "FINISHED_STOP_REQUESTED")
# except brozzler.browser.BrowsingAborted:
# self.logger.info("{} shut down".format(browser))
except brozzler.ProxyError as e:
if self._warcprox_auto:
logging.error(
'proxy error (site.proxy=%s), will try to choose a '
'healthy instance next time site is brozzled: %s',
site.proxy, e)
site.proxy = None
else:
# using brozzler-worker --proxy, nothing to do but try the
# same proxy again next time
logging.error(
'proxy error (self._proxy=%r)', self._proxy, exc_info=1)
except:
self.logger.error(
'unexpected exception site=%r page=%r', site, page,
exc_info=True)
if page:
page.failed_attempts = (page.failed_attempts or 0) + 1
if page.failed_attempts >= brozzler.MAX_PAGE_FAILURES:
self.logger.info(
'marking page "completed" after %s unexpected '
'exceptions attempting to brozzle %s',
page.failed_attempts, page)
self._frontier.completed_page(site, page)
page = None
finally:
if start:
site.active_brozzling_time = (site.active_brozzling_time or 0) + time.time() - start
self._frontier.disclaim_site(site, page)
def _brozzle_site_thread_target(self, browser, site):
try:
self.brozzle_site(browser, site)
finally:
browser.stop()
self._browser_pool.release(browser)
with self._browsing_threads_lock:
self._browsing_threads.remove(threading.current_thread())
def _service_heartbeat(self):
if hasattr(self, "status_info"):
status_info = self.status_info
else:
status_info = {
"role": "brozzler-worker",
"ttl": self.HEARTBEAT_INTERVAL * 3,
}
status_info["load"] = 1.0 * self._browser_pool.num_in_use() / self._browser_pool.size
status_info["browser_pool_size"] = self._browser_pool.size
status_info["browsers_in_use"] = self._browser_pool.num_in_use()
try:
self.status_info = self._service_registry.heartbeat(status_info)
self.logger.trace(
"status in service registry: %s", self.status_info)
except r.ReqlError as e:
self.logger.error(
"failed to send heartbeat and update service registry "
"with info %s: %s", status_info, e)
def _service_heartbeat_if_due(self):
'''Sends service registry heartbeat if due'''
due = False
if self._service_registry:
if not hasattr(self, "status_info"):
due = True
else:
d = doublethink.utcnow() - self.status_info["last_heartbeat"]
due = d.total_seconds() > self.HEARTBEAT_INTERVAL
if due:
self._service_heartbeat()
def _start_browsing_some_sites(self):
'''
Starts browsing some sites.
Raises:
NoBrowsersAvailable if none available
'''
# acquire_multi() raises NoBrowsersAvailable if none available
browsers = self._browser_pool.acquire_multi(
(self._browser_pool.num_available() + 1) // 2)
try:
sites = self._frontier.claim_sites(len(browsers))
except:
self._browser_pool.release_all(browsers)
raise
for i in range(len(browsers)):
if i < len(sites):
th = threading.Thread(
target=self._brozzle_site_thread_target,
args=(browsers[i], sites[i]),
name="BrozzlingThread:%s" % browsers[i].chrome.port,
daemon=True)
with self._browsing_threads_lock:
self._browsing_threads.add(th)
th.start()
else:
self._browser_pool.release(browsers[i])
def run(self):
self.logger.notice(
'brozzler %s - brozzler-worker starting', brozzler.__version__)
last_nothing_to_claim = 0
try:
while not self._shutdown.is_set():
self._service_heartbeat_if_due()
if time.time() - last_nothing_to_claim > 20:
try:
self._start_browsing_some_sites()
except brozzler.browser.NoBrowsersAvailable:
logging.trace(
"all %s browsers are in use",
self._max_browsers)
except brozzler.NothingToClaim:
last_nothing_to_claim = time.time()
logging.trace(
"nothing to claim, all available active sites "
"are already claimed by a brozzler worker")
time.sleep(0.5)
self.logger.notice("shutdown requested")
except r.ReqlError as e:
self.logger.error(
"caught rethinkdb exception, will try to proceed",
exc_info=True)
except brozzler.ShutdownRequested:
self.logger.info("shutdown requested")
except:
self.logger.critical(
"thread exiting due to unexpected exception",
exc_info=True)
finally:
if self._service_registry and hasattr(self, "status_info"):
try:
self._service_registry.unregister(self.status_info["id"])
except:
self.logger.error(
"failed to unregister from service registry",
exc_info=True)
self.logger.info(
'shutting down %s brozzling threads',
len(self._browsing_threads))
with self._browsing_threads_lock:
for th in self._browsing_threads:
if th.is_alive():
brozzler.thread_raise(th, brozzler.ShutdownRequested)
self._browser_pool.shutdown_now()
# copy to avoid "RuntimeError: Set changed size during iteration"
thredz = set(self._browsing_threads)
for th in thredz:
th.join()
def start(self):
with self._start_stop_lock:
if self._thread:
self.logger.warning(
'ignoring start request because self._thread is '
'not None')
return
self._thread = threading.Thread(
target=self.run, name="BrozzlerWorker")
self._thread.start()
def shutdown_now(self):
self.stop()
def stop(self):
self._shutdown.set()
def is_alive(self):
return self._thread and self._thread.is_alive()
|
test_failure.py | import json
import logging
import os
import sys
import tempfile
import threading
import time
import uuid
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import (
relevant_errors,
wait_for_condition,
wait_for_errors,
RayTestTimeoutException,
)
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_get_throws_quickly_when_found_exception(ray_start_regular):
def random_path():
return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
def touch(path):
with open(path, "w"):
pass
def wait_for_file(path):
while True:
if os.path.exists(path):
break
time.sleep(0.1)
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, path):
wait_for_file(path)
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
f = random_path()
actor = Actor.options(max_concurrency=2).remote()
expect_exception([actor.bad_func1.remote(),
actor.slow_func.remote(f)], ray.exceptions.RayTaskError)
touch(f)
f = random_path()
actor = Actor.options(max_concurrency=2).remote()
expect_exception([actor.bad_func2.remote(),
actor.slow_func.remote(f)], ray.exceptions.RayActorError)
touch(f)
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) >= 2, errors
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
ray.experimental.signal.reset = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor:
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(ray_start_2_cpus):
@ray.remote
def f():
pass
# Wait for the monitor process to start.
ray.get(f.remote())
time.sleep(1)
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylet and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.Value("HEARTBEAT_BATCH"),
ray.gcs_utils.TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB"), fake_id,
malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(shutdown_only):
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
wait_for_errors(ray_constants.RESOURCE_DEADLOCK_ERROR, 1, timeout=30)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(ray.ObjectID.from_random())
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1, timeout=2)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(RayTestTimeoutException):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_internal_config": json.dumps({
"object_store_full_max_retries": 0
})
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_internal_config=json.dumps({
"object_store_full_max_retries": 0
}))
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
while ray.worker.global_worker.core_worker.object_exists(obj):
time.sleep(1)
# ray.get throws an exception.
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_serialized_id_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
@ray.remote
def get(obj_ids):
obj_id = obj_ids[0]
assert (isinstance(ray.get(obj_id), np.ndarray))
# Wait for the object to be evicted.
ray.internal.free(obj_id)
while ray.worker.global_worker.core_worker.object_exists(obj_id):
time.sleep(1)
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj_id)
print("get done", obj_ids)
obj = large_object.remote()
result = get.remote([obj])
ray.internal.free(obj)
ray.get(result)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_ids, test_dependent_task):
print("get", obj_ids)
obj_id = obj_ids[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_id)) == 1
else:
assert ray.get(obj_id) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
def test_fate_sharing(ray_start_cluster):
config = json.dumps({
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
})
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _internal_config=config)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
ray.init(address=cluster.address)
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
assert wait_for_condition(
lambda: not child_resource_available(), timeout_ms=10000)
# Kill the parent process.
os.kill(pid, 9)
assert wait_for_condition(child_resource_available, timeout_ms=10000)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
assert wait_for_condition(
lambda: not child_resource_available(), timeout_ms=10000)
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
assert wait_for_condition(child_resource_available, timeout_ms=10000)
return node_to_kill
test_process_failure(use_actors=True)
test_process_failure(use_actors=False)
node_to_kill = test_node_failure(node_to_kill, use_actors=True)
node_to_kill = test_node_failure(node_to_kill, use_actors=False)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
stlview.py | #!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
import time
import numpy
import pyglet
pyglet.options['debug_gl'] = True
from pyglet.gl import GL_AMBIENT_AND_DIFFUSE, glBegin, glClearColor, \
glColor3f, GL_CULL_FACE, GL_DEPTH_TEST, GL_DIFFUSE, GL_EMISSION, \
glEnable, glEnd, GL_FILL, GLfloat, GL_FRONT_AND_BACK, GL_LIGHT0, \
GL_LIGHT1, glLightfv, GL_LIGHTING, GL_LINE, glMaterialf, glMaterialfv, \
glMultMatrixd, glNormal3f, glPolygonMode, glPopMatrix, GL_POSITION, \
glPushMatrix, glRotatef, glScalef, glShadeModel, GL_SHININESS, \
GL_SMOOTH, GL_SPECULAR, glTranslatef, GL_TRIANGLES, glVertex3f, \
glGetDoublev, GL_MODELVIEW_MATRIX, GLdouble, glClearDepth, glDepthFunc, \
GL_LEQUAL, GL_BLEND, glBlendFunc, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, \
GL_LINE_LOOP, glGetFloatv, GL_LINE_WIDTH, glLineWidth, glDisable, \
GL_LINE_SMOOTH
from pyglet import gl
from .gl.panel import wxGLPanel
from .gl.trackball import build_rotmatrix
from .gl.libtatlin import actors
def vec(*args):
return (GLfloat * len(args))(*args)
class stlview:
def __init__(self, facets, batch):
# Create the vertex and normal arrays.
vertices = []
normals = []
for i in facets:
for j in i[1]:
vertices.extend(j)
normals.extend(i[0])
# Create a list of triangle indices.
indices = list(range(3 * len(facets))) # [[3*i, 3*i+1, 3*i+2] for i in xrange(len(facets))]
self.vertex_list = batch.add_indexed(len(vertices) // 3,
GL_TRIANGLES,
None, # group,
indices,
('v3f/static', vertices),
('n3f/static', normals))
def delete(self):
self.vertex_list.delete()
class StlViewPanel(wxGLPanel):
do_lights = False
def __init__(self, parent, size,
build_dimensions = None, circular = False,
antialias_samples = 0,
grid = (1, 10)):
super().__init__(parent, wx.DefaultPosition, size, 0,
antialias_samples = antialias_samples)
self.batches = []
self.rot = 0
self.canvas.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.canvas.Bind(wx.EVT_MOUSEWHEEL, self.wheel)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.double_click)
self.initialized = True
self.parent = parent
self.initpos = None
if build_dimensions:
self.build_dimensions = build_dimensions
else:
self.build_dimensions = [200, 200, 100, 0, 0, 0]
self.platform = actors.Platform(self.build_dimensions,
circular = circular,
grid = grid)
self.dist = max(self.build_dimensions[0], self.build_dimensions[1])
self.basequat = [0, 0, 0, 1]
wx.CallAfter(self.forceresize) #why needed
self.mousepos = (0, 0)
def OnReshape(self):
self.mview_initialized = False
super(StlViewPanel, self).OnReshape()
# ==========================================================================
# GLFrame OpenGL Event Handlers
# ==========================================================================
def OnInitGL(self, call_reshape = True):
'''Initialize OpenGL for use in the window.'''
if self.GLinitialized:
return
self.GLinitialized = True
# create a pyglet context for this panel
self.pygletcontext = gl.Context(gl.current_context)
self.pygletcontext.canvas = self
self.pygletcontext.set_current()
# normal gl init
glClearColor(0, 0, 0, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
glClearDepth(1.0)
glDepthFunc(GL_LEQUAL)
glEnable(GL_CULL_FACE)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Uncomment this line for a wireframe view
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glShadeModel(GL_SMOOTH)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, vec(0, 0.1, 0, 0.9))
if call_reshape:
self.OnReshape()
if hasattr(self.parent, "filenames") and self.parent.filenames:
for filename in self.parent.filenames:
self.parent.load_file(filename)
self.parent.autoplate()
if hasattr(self.parent, "loadcb"):
self.parent.loadcb()
self.parent.filenames = None
def double_click(self, event):
if hasattr(self.parent, "clickcb") and self.parent.clickcb:
self.parent.clickcb(event)
def forceresize(self):
#print('forceresize')
x, y = self.GetClientSize()
#TODO: probably not needed
self.SetClientSize((x, y+1))
self.SetClientSize((x, y))
self.initialized = False
def move(self, event):
"""react to mouse actions:
no mouse: show red mousedrop
LMB: move active object,
with shift rotate viewport
RMB: nothing
with shift move viewport
"""
self.mousepos = event.GetPosition()
if event.Dragging():
if event.LeftIsDown():
self.handle_rotation(event)
elif event.RightIsDown():
self.handle_translation(event)
self.Refresh(False)
elif event.ButtonUp(wx.MOUSE_BTN_LEFT) or \
event.ButtonUp(wx.MOUSE_BTN_RIGHT):
self.initpos = None
event.Skip()
def handle_wheel(self, event):
delta = event.GetWheelRotation()
factor = 1.05
x, y = event.GetPosition()
x, y, _ = self.mouse_to_3d(x, y, local_transform = True)
if delta > 0:
self.zoom(factor, (x, y))
else:
self.zoom(1 / factor, (x, y))
def wheel(self, event):
"""react to mouse wheel actions:
rotate object
with shift zoom viewport
"""
self.handle_wheel(event)
wx.CallAfter(self.Refresh)
def keypress(self, event):
"""gets keypress events and moves/rotates active shape"""
keycode = event.GetKeyCode()
step = 5
angle = 18
if event.ControlDown():
step = 1
angle = 1
# h
if keycode == 72:
self.parent.move_shape((-step, 0))
# l
if keycode == 76:
self.parent.move_shape((step, 0))
# j
if keycode == 75:
self.parent.move_shape((0, step))
# k
if keycode == 74:
self.parent.move_shape((0, -step))
# [
if keycode == 91:
self.parent.rotate_shape(-angle)
# ]
if keycode == 93:
self.parent.rotate_shape(angle)
event.Skip()
wx.CallAfter(self.Refresh)
def anim(self, obj):
g = 50 * 9.8
v = 20
dt = 0.05
basepos = obj.offsets[2]
obj.offsets[2] += obj.animoffset
while obj.offsets[2] > -1:
time.sleep(dt)
obj.offsets[2] -= v * dt
v += g * dt
if obj.offsets[2] < 0:
obj.scale[2] *= 1 - 3 * dt
# return
v = v / 4
while obj.offsets[2] < basepos:
time.sleep(dt)
obj.offsets[2] += v * dt
v -= g * dt
obj.scale[2] *= 1 + 5 * dt
obj.scale[2] = 1.0
def create_objects(self):
'''create opengl objects when opengl is initialized'''
if not self.platform.initialized:
self.platform.init()
self.initialized = 1
#TODO: this probably creates constant redraw
# create_objects is called during OnDraw, remove
wx.CallAfter(self.Refresh)
def prepare_model(self, m, scale):
batch = pyglet.graphics.Batch()
stlview(m.facets, batch = batch)
m.batch = batch
# m.animoffset = 300
# threading.Thread(target = self.anim, args = (m, )).start()
wx.CallAfter(self.Refresh)
def update_object_resize(self):
'''called when the window receives only if opengl is initialized'''
pass
def draw_objects(self):
'''called in the middle of ondraw after the buffer has been cleared'''
self.create_objects()
glPushMatrix()
glTranslatef(0, 0, -self.dist)
glMultMatrixd(build_rotmatrix(self.basequat)) # Rotate according to trackball
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.2, 0.2, 0.2, 1))
glTranslatef(- self.build_dimensions[3] - self.platform.width / 2,
- self.build_dimensions[4] - self.platform.depth / 2, 0) # Move origin to bottom left of platform
# Draw platform
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glDisable(GL_LIGHTING)
self.platform.draw()
glEnable(GL_LIGHTING)
# Draw mouse
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = (0, 0, 1), plane_offset = 0,
local_transform = False)
if inter is not None:
glPushMatrix()
glTranslatef(inter[0], inter[1], inter[2])
glBegin(GL_TRIANGLES)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(1, 0, 0, 1))
glNormal3f(0, 0, 1)
glVertex3f(2, 2, 0)
glVertex3f(-2, 2, 0)
glVertex3f(-2, -2, 0)
glVertex3f(2, -2, 0)
glVertex3f(2, 2, 0)
glVertex3f(-2, -2, 0)
glEnd()
glPopMatrix()
# Draw objects
glDisable(GL_CULL_FACE)
glPushMatrix()
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.3, 0.7, 0.5, 1))
for i in self.parent.models:
model = self.parent.models[i]
glPushMatrix()
glTranslatef(*(model.offsets))
glRotatef(model.rot, 0.0, 0.0, 1.0)
glTranslatef(*(model.centeroffset))
glScalef(*model.scale)
model.batch.draw()
glPopMatrix()
glPopMatrix()
glEnable(GL_CULL_FACE)
# Draw cutting plane
if self.parent.cutting:
# FIXME: make this a proper Actor
axis = self.parent.cutting_axis
fixed_dist = self.parent.cutting_dist
dist, plane_width, plane_height = self.get_cutting_plane(axis, fixed_dist)
if dist is not None:
glPushMatrix()
if axis == "x":
glRotatef(90, 0, 1, 0)
glRotatef(90, 0, 0, 1)
glTranslatef(0, 0, dist)
elif axis == "y":
glRotatef(90, 1, 0, 0)
glTranslatef(0, 0, -dist)
elif axis == "z":
glTranslatef(0, 0, dist)
glDisable(GL_CULL_FACE)
glBegin(GL_TRIANGLES)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0, 0.9, 0.15, 0.3))
glNormal3f(0, 0, self.parent.cutting_direction)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(0, plane_height, 0)
glVertex3f(0, 0, 0)
glVertex3f(plane_width, 0, 0)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(0, 0, 0)
glEnd()
glEnable(GL_CULL_FACE)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_LINE_SMOOTH)
orig_linewidth = (GLfloat)()
glGetFloatv(GL_LINE_WIDTH, orig_linewidth)
glLineWidth(4.0)
glBegin(GL_LINE_LOOP)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0, 0.8, 0.15, 1))
glVertex3f(0, 0, 0)
glVertex3f(0, plane_height, 0)
glVertex3f(plane_width, plane_height, 0)
glVertex3f(plane_width, 0, 0)
glEnd()
glLineWidth(orig_linewidth)
glDisable(GL_LINE_SMOOTH)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopMatrix()
glPopMatrix()
# ==========================================================================
# Utils
# ==========================================================================
def get_modelview_mat(self, local_transform):
mvmat = (GLdouble * 16)()
if local_transform:
glPushMatrix()
# Rotate according to trackball
glTranslatef(0, 0, -self.dist)
glMultMatrixd(build_rotmatrix(self.basequat)) # Rotate according to trackball
glTranslatef(- self.build_dimensions[3] - self.platform.width / 2,
- self.build_dimensions[4] - self.platform.depth / 2, 0) # Move origin to bottom left of platform
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
glPopMatrix()
else:
glGetDoublev(GL_MODELVIEW_MATRIX, mvmat)
return mvmat
def get_cutting_plane(self, cutting_axis, fixed_dist, local_transform = False):
cutting_plane_sizes = {"x": (self.platform.depth, self.platform.height),
"y": (self.platform.width, self.platform.height),
"z": (self.platform.width, self.platform.depth)}
plane_width, plane_height = cutting_plane_sizes[cutting_axis]
if fixed_dist is not None:
return fixed_dist, plane_width, plane_height
ref_sizes = {"x": self.platform.width,
"y": self.platform.depth,
"z": self.platform.height,
}
ref_planes = {"x": (0, 0, 1),
"y": (0, 0, 1),
"z": (0, 1, 0)
}
ref_offsets = {"x": 0,
"y": 0,
"z": - self.platform.depth / 2
}
translate_axis = {"x": 0,
"y": 1,
"z": 2
}
fallback_ref_planes = {"x": (0, 1, 0),
"y": (1, 0, 0),
"z": (1, 0, 0)
}
fallback_ref_offsets = {"x": - self.platform.height / 2,
"y": - self.platform.width / 2,
"z": - self.platform.width / 2,
}
ref_size = ref_sizes[cutting_axis]
ref_plane = ref_planes[cutting_axis]
ref_offset = ref_offsets[cutting_axis]
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = ref_plane,
plane_offset = ref_offset,
local_transform = local_transform)
max_size = max((self.platform.width,
self.platform.depth,
self.platform.height))
dist = None
if inter is not None and numpy.fabs(inter).max() + max_size / 2 < 2 * max_size:
dist = inter[translate_axis[cutting_axis]]
if dist is None or dist < -0.5 * ref_size or dist > 1.5 * ref_size:
ref_plane = fallback_ref_planes[cutting_axis]
ref_offset = fallback_ref_offsets[cutting_axis]
inter = self.mouse_to_plane(self.mousepos[0], self.mousepos[1],
plane_normal = ref_plane,
plane_offset = ref_offset,
local_transform = False)
if inter is not None and numpy.fabs(inter).max() + max_size / 2 < 2 * max_size:
dist = inter[translate_axis[cutting_axis]]
if dist is not None:
dist = min(1.5 * ref_size, max(-0.5 * ref_size, dist))
return dist, plane_width, plane_height
def main():
app = wx.App(redirect = False)
frame = wx.Frame(None, -1, "GL Window", size = (400, 400))
StlViewPanel(frame)
frame.Show(True)
app.MainLoop()
app.Destroy()
if __name__ == "__main__":
main()
|
serve.py | import abc
import argparse
import importlib
import json
import logging
import multiprocessing
import os
import platform
import subprocess
import sys
import threading
import time
import traceback
import urllib
import uuid
from collections import defaultdict, OrderedDict
from itertools import chain, product
from typing import ClassVar, List, Set, Tuple
from localpaths import repo_root # type: ignore
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants # type: ignore
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts.\n"
"See https://web-platform-tests.org/running-tests/from-local-system.html#system-setup "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = [] # type: ClassVar[List[Tuple[str, str]]]
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
headers = self.headers + handlers.load_headers(
request, self._get_filesystem_path(request))
for header_name, header_value in headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_filesystem_path(self, request):
"""Get the path of the underlying resource file on disk."""
return self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_filesystem_path(request)
try:
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
yield key, value
except IOError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type = None # type: ClassVar[str]
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type:
globals = u""
for (key, value) in self._get_metadata(request):
if key == "global":
globals = value
break
if self.global_type not in parse_variants(globals):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == "timeout":
if value == "long":
return '<meta name="timeout" content="long">'
if key == "title":
value = value.replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
global_type = "dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WorkerModulesHandler(HtmlWrapperHandler):
global_type = "dedicatedworker-module"
path_replace = [(".any.worker-module.html", ".any.js", ".any.worker-module.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = "window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = "sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class SharedWorkerModulesHandler(HtmlWrapperHandler):
global_type = "sharedworker-module"
path_replace = [(".any.sharedworker-module.html", ".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = "serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class ServiceWorkerModulesHandler(HtmlWrapperHandler):
global_type = "serviceworker-module"
path_replace = [(".any.serviceworker-module.html",
".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register(
"%(path)s%(query)s",
{ scope, type: 'module' },
);
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class BaseWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
def _meta_replacement(self, key, value):
return None
@abc.abstractmethod
def _create_script_import(self, attribute):
# Take attribute (a string URL to a JS script) and return JS source to import the script
# into the worker.
pass
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("\\", "\\\\").replace('"', '\\"')
return self._create_script_import(attribute)
if key == "title":
value = value.replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
class ClassicWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _create_script_import(self, attribute):
return 'importScripts("%s")' % attribute
class ModuleWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker-module.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
import "/resources/testharness.js";
%(script)s
import "%(path)s";
done();
"""
def _create_script_import(self, attribute):
return 'import "%s";' % attribute
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/results/", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.worker-module.html", WorkerModulesHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.sharedworker-module.html", SharedWorkerModulesHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.serviceworker-module.html", ServiceWorkerModulesHandler),
("GET", "*.any.worker.js", ClassicWorkerHandler),
("GET", "*.any.worker-module.js", ModuleWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def get_route_builder(logger, aliases, config):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder
class ServerProc(object):
def __init__(self, mp_context, scheme=None):
self.proc = None
self.daemon = None
self.mp_context = mp_context
self.stop_flag = mp_context.Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, log_handlers, **kwargs):
self.proc = self.mp_context.Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config, log_handlers),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
importlib.reload(logging)
logger = get_logger(config.log_level, log_handlers)
if sys.platform == "darwin":
# on Darwin, NOFILE starts with a very low limit (256), so bump it up a little
# by way of comparison, Debian starts with a limit of 1024, Windows 512
import resource # local, as it only exists on Unix-like systems
maxfilesperproc = int(subprocess.check_output(
["sysctl", "-n", "kern.maxfilesperproc"]
).strip())
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# 2048 is somewhat arbitrary, but gives us some headroom for wptrunner --parallel
# note that it's expected that 2048 will be the min here
new_soft = min(2048, maxfilesperproc, hard)
if soft < new_soft:
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
try:
self.daemon = init_func(logger, host, port, paths, routes, bind_address, config, **kwargs)
except OSError:
logger.critical("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
logger.critical(traceback.format_exc())
raise
if self.daemon:
try:
self.daemon.start()
try:
self.stop_flag.wait()
except KeyboardInterrupt:
pass
finally:
self.daemon.stop()
except Exception:
logger.critical(traceback.format_exc())
raise
def stop(self, timeout=None):
self.stop_flag.set()
self.proc.join(timeout)
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(logger, config, routes, mp_context, log_handlers):
paths = config.paths
bind_address = config.bind_address
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc(mp_context)
wrapper.start(start_http_server, host, port, paths, routes,
bind_address, config, log_handlers)
url = "http://{}:{}/".format(host, port)
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP))
sys.exit(1)
wrapper.stop()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(logger, host, ports, paths, routes, bind_address, config,
mp_context, log_handlers, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires OpenSSL 1.0.2+')
continue
for port in ports:
if port is None:
continue
init_func = {
"http": start_http_server,
"http-private": start_http_server,
"http-public": start_http_server,
"https": start_https_server,
"https-private": start_https_server,
"https-public": start_https_server,
"h2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server,
"webtransport-h3": start_webtransport_h3_server,
}[scheme]
server_proc = ServerProc(mp_context, scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def startup_failed(logger):
logger.critical(EDIT_HOSTS_HELP)
sys.exit(1)
def start_http_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_https_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_http2_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
ws_doc_root=paths["ws_doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
except Exception:
startup_failed(logger)
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
logger = logging.getLogger()
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"]]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
if not ports:
# TODO: Fix the logging configuration in WebSockets processes
# see https://github.com/web-platform-tests/wpt/issues/22719
logger.critical("Failed to start websocket server on port %s, "
"is something already using that port?" % port, file=sys.stderr)
raise OSError()
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self):
self.started = True
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
except Exception:
startup_failed(logger)
def start_wss_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
except Exception:
startup_failed(logger)
def start_webtransport_h3_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
# TODO(bashi): Move the following import to the beginning of this file
# once WebTransportH3Server is enabled by default.
from webtransport.h3.webtransport_h3_server import WebTransportH3Server # type: ignore
return WebTransportH3Server(host=host,
port=port,
doc_root=paths["doc_root"],
cert_path=config.ssl_config["cert_path"],
key_path=config.ssl_config["key_path"],
logger=logger)
except Exception as error:
logger.critical(
"Failed to start WebTransport over HTTP/3 server: {}".format(error))
sys.exit(0)
def start(logger, config, routes, mp_context, log_handlers, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(logger, host, ports, paths, routes, bind_address, config, mp_context,
log_handlers, **kwargs)
return servers
def iter_servers(servers):
for servers in servers.values():
for port, server in servers:
yield server
def _make_subdomains_product(s: Set[str], depth: int = 2) -> Set[str]:
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
def _make_origin_policy_subdomains(limit: int) -> Set[str]:
return {u"op%d" % x for x in range(1,limit+1)}
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
# Origin policy subdomains need to not be reused by any other tests, since origin policies have
# origin-wide impacts like installing a CSP or Feature Policy that could interfere with features
# under test.
# See https://github.com/web-platform-tests/rfcs/pull/44.
_subdomains |= _make_origin_policy_subdomains(99)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"http-private": ["auto"],
"http-public": ["auto"],
"https": [8443, 8444],
"https-private": ["auto"],
"https-public": ["auto"],
"ws": ["auto"],
"wss": ["auto"],
},
"check_subdomains": True,
"log_level": "info",
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": []
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, logger, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super(ConfigBuilder, self).__init__(
logger,
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def _get_paths(self, data):
rv = super(ConfigBuilder, self)._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def build_config(logger, override_path=None, config_cls=ConfigBuilder, **kwargs):
rv = config_cls(logger)
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if kwargs.get("webtransport_h3"):
rv._default["ports"]["webtransport-h3"] = [11000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
if kwargs.get("verbose"):
rv.log_level = "debug"
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2", default=None,
help=argparse.SUPPRESS)
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
parser.add_argument("--webtransport-h3", action="store_true",
help="Enable WebTransport over HTTP/3 server")
parser.add_argument("--exit-after-start", action="store_true", help="Exit after starting servers")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
parser.set_defaults(report=False)
parser.set_defaults(is_wave=False)
return parser
class MpContext(object):
def __getattr__(self, name):
return getattr(multiprocessing, name)
def get_logger(log_level, log_handlers):
"""Get a logger configured to log at level log_level
If the logger has existing handlers the log_handlers argument is ignored.
Otherwise the handlers in log_handlers are added to the logger. If there are
no log_handlers passed and no configured handlers, a stream handler is added
to the logger.
Typically this is called once per process to set up logging in that process.
:param log_level: - A string representing a log level e.g. "info"
:param log_handlers: - Optional list of Handler objects.
"""
logger = logging.getLogger()
logger.setLevel(getattr(logging, log_level.upper()))
if not logger.hasHandlers():
if log_handlers is not None:
for handler in log_handlers:
logger.addHandler(handler)
else:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("[%(asctime)s %(processName)s] %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(config_cls=ConfigBuilder, route_builder=None, mp_context=None, log_handlers=None,
**kwargs):
logger = get_logger("INFO", log_handlers)
if mp_context is None:
if hasattr(multiprocessing, "get_context"):
mp_context = multiprocessing.get_context()
else:
mp_context = MpContext()
with build_config(logger,
os.path.join(repo_root, "config.json"),
config_cls=config_cls,
**kwargs) as config:
# This sets the right log level
logger = get_logger(config.log_level, log_handlers)
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"], 'r') as alias_file:
for line in alias_file:
alias, doc_root = [x.strip() for x in line.split(',')]
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if route_builder is None:
route_builder = get_route_builder
routes = route_builder(logger, config.aliases, config).get_routes()
if config["check_subdomains"]:
check_subdomains(logger, config, routes, mp_context, log_handlers)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(logger, config, routes, mp_context, log_handlers, **kwargs)
if not kwargs["exit_after_start"]:
try:
# Periodically check if all the servers are alive
server_process_exited = False
while not server_process_exited:
for server in iter_servers(servers):
server.proc.join(1)
if not server.proc.is_alive():
server_process_exited = True
break
except KeyboardInterrupt:
pass
failed_subproc = 0
for server in iter_servers(servers):
subproc = server.proc
if subproc.is_alive():
logger.info('Status of subprocess "%s": running', subproc.name)
server.stop(timeout=1)
if server.proc.exitcode == 0:
logger.info('Status of subprocess "%s": exited correctly', subproc.name)
else:
logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d',
subproc.name, subproc.exitcode)
failed_subproc += 1
return failed_subproc
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
runtests.py | #!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None,
capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHONIZE", "PYTHON %s" % os.path.join(self.cython_root, 'cythonize.py'))
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + old_path
env['PYTHONPATH'] = new_path
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command,
shell=True,
env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.capture:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
wgmine.py | import time
import argparse
import base64
from multiprocessing import Process
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
# benchmark cryptography: 110000K/s
# benchmark donna: 48000K/s
def mine(name):
name = name.encode("ascii")
while True:
private = X25519PrivateKey.generate()
public = private.public_key()
b64 = base64.b64encode(
public.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
)
if b64.startswith(name):
print(
b64.decode("ascii"),
base64.b64encode(
private.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
).decode("ascii"),
)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("name", type=str, help="name to search (start of pubkey)")
parser.add_argument(
"-m",
"--multiprocessing",
type=int,
default=6,
help="number of processes to spawn",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
processes = []
s = time.perf_counter()
try:
for i in range(args.multiprocessing):
p = Process(target=mine, args=(args.name,))
p.start()
processes.append(p)
p.join()
except KeyboardInterrupt:
pass
finally:
e = time.perf_counter()
print(f"Elapsed: {e - s}s")
for p in processes:
p.kill()
|
ui_utils.py | # -*- coding: utf-8 -*-
import collections
import logging
import os
import platform
import queue
import re
import signal
import subprocess
import sys
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
import warnings
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from _tkinter import TclError
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.languages import get_button_padding, tr
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.tktextext import TweakableText
PARENS_REGEX = re.compile(r"[\(\)\{\}\[\]]")
logger = logging.getLogger(__name__)
class CommonDialog(tk.Toplevel):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self.bind("<FocusIn>", self._unlock_on_focus_in, True)
def _unlock_on_focus_in(self, event):
if not self.winfo_ismapped():
focussed_widget = self.focus_get()
self.deiconify()
if focussed_widget:
focussed_widget.focus_set()
def get_padding(self):
return ems_to_pixels(2)
def get_internal_padding(self):
return self.get_padding() // 4
class CommonDialogEx(CommonDialog):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
# Need to fill the dialog with a frame to gain theme support
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.bind("<Escape>", self.on_close, True)
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self, event=None):
self.destroy()
class QueryDialog(CommonDialogEx):
def __init__(
self,
master,
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
):
super().__init__(master)
self.var = tk.StringVar(value=initial_value)
self.result = None
margin = self.get_padding()
spacing = margin // 2
self.title(title)
self.prompt_label = ttk.Label(self.main_frame, text=prompt)
self.prompt_label.grid(row=1, column=1, columnspan=2, padx=margin, pady=(margin, spacing))
if options:
self.entry_widget = ttk.Combobox(
self.main_frame, textvariable=self.var, values=options, height=15, width=entry_width
)
else:
self.entry_widget = ttk.Entry(self.main_frame, textvariable=self.var, width=entry_width)
self.entry_widget.bind("<Return>", self.on_ok, True)
self.entry_widget.bind("<KP_Enter>", self.on_ok, True)
self.entry_widget.grid(
row=3, column=1, columnspan=2, sticky="we", padx=margin, pady=(0, margin)
)
self.ok_button = ttk.Button(
self.main_frame, text=tr("OK"), command=self.on_ok, default="active"
)
self.ok_button.grid(row=5, column=1, padx=(margin, spacing), pady=(0, margin), sticky="e")
self.cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self.on_cancel)
self.cancel_button.grid(row=5, column=2, padx=(0, margin), pady=(0, margin), sticky="e")
self.main_frame.columnconfigure(1, weight=1)
self.entry_widget.focus_set()
def on_ok(self, event=None):
self.result = self.var.get()
self.destroy()
def on_cancel(self, event=None):
self.result = None
self.destroy()
def get_result(self) -> Optional[str]:
return self.result
def ask_string(
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
master=None,
):
dlg = QueryDialog(
master, title, prompt, initial_value=initial_value, options=options, entry_width=entry_width
)
show_dialog(dlg, master)
return dlg.get_result()
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, border=0, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", ems_to_pixels(0.6)))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=tr("Close"), command=self._close_tab_from_menu)
menu.add_command(label=tr("Close others"), command=self._close_other_tabs)
menu.add_command(label=tr("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logging.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
show_statusbar=False,
borderwidth=0,
relief="flat",
**tree_kw
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(
row=0, column=1, sticky=tk.NSEW, rowspan=2 if show_statusbar else 1
)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
if show_statusbar:
self.statusbar = ttk.Frame(self)
self.statusbar.grid(row=1, column=0, sticky="nswe")
else:
self.statusbar = None
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def __init__(self, master=None, style="Text", tag_current_line=False, cnf={}, **kw):
super().__init__(
master=master, style=style, tag_current_line=tag_current_line, cnf=cnf, **kw
)
self._last_event_changed_line_count = False
def direct_insert(self, index, chars, tags=None, **kw):
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
line_before = self.get(concrete_index + " linestart", concrete_index + " lineend")
self._last_event_changed_line_count = "\n" in chars
result = tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
line_after = self.get(concrete_index + " linestart", concrete_index + " lineend")
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
return result
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
chars = self.get(index1, index2)
self._last_event_changed_line_count = "\n" in chars
line_before = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
line_after = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def _is_trivial_edit(self, chars, line_before, line_after):
# line is taken after edit for insertion and before edit for deletion
if not chars.strip():
# linebreaks, including with automatic indent
# check it doesn't break a triple-quote
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
elif len(chars) > 1:
# paste, cut, load or something like this
trivial_for_coloring = False
trivial_for_parens = False
elif chars == "#":
trivial_for_coloring = "''''" not in line_before and '"""' not in line_before
trivial_for_parens = trivial_for_coloring and not re.search(PARENS_REGEX, line_before)
elif chars in "()[]{}":
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = False
elif chars == "'":
trivial_for_coloring = "'''" not in line_before and "'''" not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == '"':
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == "\\":
# can shorten closing quote
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False
else:
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
return trivial_for_coloring, trivial_for_parens
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
tw.tk.call(
"::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
tw.wm_overrideredirect(1)
except tk.TclError:
pass
else:
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("foreground", "#000000")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(CommonDialog):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
class EnhancedVar(tk.Variable):
def __init__(self, master=None, value=None, name=None, modification_listener=None):
if master is not None and not isinstance(master, (tk.Widget, tk.Wm)):
raise TypeError("First positional argument 'master' must be None, Widget or Wm")
super().__init__(master=master, value=value, name=name)
self.modified = False
self.modification_listener = modification_listener
if sys.version_info < (3, 6):
self.trace("w", self._on_write)
else:
self.trace_add("write", self._on_write)
def _on_write(self, *args):
self.modified = True
if self.modification_listener:
try:
self.modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
class EnhancedStringVar(EnhancedVar, tk.StringVar):
pass
class EnhancedIntVar(EnhancedVar, tk.IntVar):
pass
class EnhancedBooleanVar(EnhancedVar, tk.BooleanVar):
pass
class EnhancedDoubleVar(EnhancedVar, tk.DoubleVar):
pass
def create_string_var(value, modification_listener=None) -> EnhancedStringVar:
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return EnhancedStringVar(None, value, None, modification_listener)
def create_int_var(value, modification_listener=None) -> EnhancedIntVar:
"""See create_string_var"""
return EnhancedIntVar(None, value, None, modification_listener)
def create_double_var(value, modification_listener=None) -> EnhancedDoubleVar:
"""See create_string_var"""
return EnhancedDoubleVar(None, value, None, modification_listener)
def create_boolean_var(value, modification_listener=None) -> EnhancedBooleanVar:
"""See create_string_var"""
return EnhancedBooleanVar(None, value, None, modification_listener)
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
master=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def center_window(win, master=None):
# for backward compat
return assign_geometry(win, master)
def assign_geometry(win, master=None, min_left=0, min_top=0):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
left = max(left, min_left)
top = max(top, min_top)
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(CommonDialog):
def __init__(self, master, async_result, description, title="Please wait!", timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(CommonDialog):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(tr("Copying"))
if description is None:
description = tr("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(CommonDialogEx):
def __init__(
self,
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.main_frame.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self.main_frame, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar("")
if initial_choice_index is not None:
self.var.set(choices[initial_choice_index])
for choice in choices:
rb = ttk.Radiobutton(self.main_frame, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self.main_frame, text=tr("OK"), command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(CommonDialog):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text=tr("Copy to clipboard"), width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(
main_frame, command=self._close, text=tr("Close"), default="active"
)
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
close_button.focus_set()
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
):
dlg = ChoiceDialog(master, title, question, choices, initial_choice_index)
show_dialog(dlg, master)
return dlg.result
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux" or get_workbench().get_option("file.avoid_zenity"):
return filedialog
import shutil
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_check_dialog_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _check_dialog_parent(options):
if options.get("parent") and options.get("master"):
parent = options["parent"].winfo_toplevel()
master = options["master"].winfo_toplevel()
if parent is not master:
logger.warning(
"Dialog with different parent/master toplevels:\n%s",
"".join(traceback.format_stack()),
)
elif options.get("parent"):
parent = options["parent"].winfo_toplevel()
master = options["parent"].winfo_toplevel()
elif options.get("master"):
parent = options["master"].winfo_toplevel()
master = options["master"].winfo_toplevel()
else:
logger.warning("Dialog without parent:\n%s", "".join(traceback.format_stack()))
parent = tk._default_root
master = tk._default_root
options["parent"] = parent
options["master"] = master
if running_on_mac_os():
# used to require master/parent (https://bugs.python.org/issue34927)
# but this is deprecated in Catalina (https://github.com/thonny/thonny/issues/840)
# TODO: Consider removing this when upgrading from Tk 8.6.8
del options["master"]
del options["parent"]
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# TODO: log problems
print(result.stderr, file=sys.stderr)
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char) and event.keysym in (None, "??"):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True, min_left=0, min_top=0):
if getattr(dlg, "closed", False):
return
if master is None:
master = getattr(dlg, "parent", None) or getattr(dlg, "master", None) or tk._default_root
master = master.winfo_toplevel()
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master, min_left, min_top)
# dlg.wm_deiconify()
dlg.lift()
dlg.focus_set()
try:
dlg.grab_set()
except TclError as e:
print("Can't grab:", e, file=sys.stderr)
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if running_on_mac_os():
master.winfo_toplevel().grab_release()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, itemType, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(itemType, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=tr("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=tr("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=tr("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=tr("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
import webbrowser
return create_action_label(master, text or url, lambda _: webbrowser.open(url))
def create_action_label(master, text, click_handler, **kw):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text, style="Url.TLabel", cursor="hand2", font=url_font, **kw
)
url_label.bind("<Button-1>", click_handler)
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_theme():
if running_on_windows():
return "Windows"
elif running_on_rpi():
return "Raspberry Pi"
else:
return "Enhanced Clam"
def get_default_basic_theme():
if running_on_windows():
return "xpnative"
else:
return "clam"
EM_WIDTH = None
def ems_to_pixels(x):
global EM_WIDTH
if EM_WIDTH is None:
EM_WIDTH = tkinter.font.nametofont("TkDefaultFont").measure("m")
return int(EM_WIDTH * x)
_btn_padding = None
def set_text_if_different(widget, text):
if widget["text"] != text:
widget["text"] = text
def tr_btn(s):
"""Translates button caption, adds padding to make sure text fits"""
global _btn_padding
if _btn_padding is None:
_btn_padding = get_button_padding()
return _btn_padding + tr(s) + _btn_padding
def add_messagebox_parent_checker():
def wrap_with_parent_checker(original):
def wrapper(*args, **options):
_check_dialog_parent(options)
return original(*args, **options)
return wrapper
from tkinter import messagebox
for name in [
"showinfo",
"showwarning",
"showerror",
"askquestion",
"askokcancel",
"askyesno",
"askyesnocancel",
"askretrycancel",
]:
fun = getattr(messagebox, name)
setattr(messagebox, name, wrap_with_parent_checker(fun))
if __name__ == "__main__":
root = tk.Tk()
|
crontab_autocheck.py | import requests, datetime, math, threading
quarter = str(datetime.datetime.now().year)+"Q"+str(math.ceil(datetime.datetime.now().month/3.))
url = "http://dataquality.utrustfintech.com/check/rule_execute"
def post_rule_execute(company, quarter):
data = {'company': company, 'username': 'crontab', 'quarter': quarter}
r = requests.post(url, data)
t1 = threading.Thread(target=post_rule_execute, args=('ycxt', quarter))
t2 = threading.Thread(target=post_rule_execute, args=('yczc', quarter))
t3 = threading.Thread(target=post_rule_execute, args=('gdzdb', quarter))
t4 = threading.Thread(target=post_rule_execute, args=('ycjk', quarter))
t5 = threading.Thread(target=post_rule_execute, args=('fdct', quarter))
t6 = threading.Thread(target=post_rule_execute, args=('zyyc', quarter))
t7 = threading.Thread(target=post_rule_execute, args=('jz', quarter))
t1.start();t2.start();t3.start();t4.start();t5.start();t6.start();t7.start()
# 等待运行结束
t1.join();t2.join();t3.join();t4.join();t5.join();t6.join();t7.join()
|
dctp.py | import socket
import time
from threading import Thread, RLock
import json as _json
from utils import print_warning, print_info
def send_status_code(status, status_text):
return {'status': status, 'status_text': status_text}
class Request():
def __init__(self, json, data=b''):
if 'id_worker' in json:
self.id_worker = json.pop('id_worker')
if 'id_client' in json:
self.id_client = json.pop('id_client')
if 'method' in json:
self.method = json.pop('method')
self.json = json
self.data = data
class Response():
def __init__(self, json, data=b''):
self.status = json.pop('status')
self.status_text = json.pop('status_text')
if 'id_worker' in json:
self.id_worker = json.pop('id_worker')
if 'id_client' in json:
self.id_client = json.pop('id_client')
self.json = json
self.data = data
class ClientDCTP():
def __init__(self, client_name, reconnect=False):
self._type_connection = ['server to client', 'client to server']
self._lock_obj = RLock()
self._client_name = client_name
self._dict_methods_call = {}
self._socks = {}
self._stoping = False
self._connected = False
self._reconnect = reconnect
self.receiver_thread = None
@property
def client_name(self):
return self._client_name
def _close_socks(self):
self._connected = False
for type_connect in self._socks:
try:
self._socks[type_connect].close()
except:
pass
def disconnect(self):
self._stoping = True
self._close_socks()
while self.receiver_thread and self.receiver_thread.is_alive():
time.sleep(0.1)
def is_connected(self):
return self._connected
def _reconnect_loop(self):
while not self._stoping:
if not self.is_connected():
self._connect()
time.sleep(1)
def _connect(self):
try:
for type_connect in self._type_connection:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._ip, self._port))
# регистрируем клиента
json = _json.dumps({'id_worker': self._client_name, 'type': type_connect})
sock.send(len(json).to_bytes(4, "big") + bytes(json, 'utf-8'))
# ждем подтверждения регистрации от сервера
data = self._recv_data(sock)
if data[0]['status'] != 0:
return False
self._socks[type_connect] = sock
if type_connect == 'server to client':
# Созданем поток и принимаем входящие запросы от сервера
self.receiver_thread = Thread(target=self._receiver, args=[sock])
self.receiver_thread.start()
self._connected = True
except:
pass
# Устанавливаем соединение
def connect(self, ip, port):
self.disconnect()
self._stoping = False
self._ip = ip
self._port = port
if self._reconnect:
reconnect_thread = Thread(target=self._reconnect_loop)
reconnect_thread.start()
else:
self._connect()
def _send_data(self, sock, id_client, json, data):
sock.send(len(id_client).to_bytes(4, "big") + bytes(id_client, 'utf-8') +
len(json).to_bytes(4, "big") + bytes(json, 'utf-8') + len(data).to_bytes(4, "big") + data)
def _recv_data(self, sock):
try:
# принимаем данные
length_json = int.from_bytes(sock.recv(4), 'big')
length_data = int.from_bytes(sock.recv(4), 'big')
return _json.loads(sock.recv(length_json).decode('utf-8')), sock.recv(length_data).decode('utf-8')
except:
return
def request(self, method, id_client=None, json={}, data=b''):
if id_client is None:
id_client = self.client_name
if type(data) != bytes:
return Response(send_status_code(100, 'Parameter data is not bytes'))
json['method'] = method
json = _json.dumps(json)
try:
with self._lock_obj:
sock = self._socks['client to server']
# Отправляем запрос серверу
self._send_data(sock, id_client, json, data)
# Принимаем запрос от сервера
return Response(*self._recv_data(sock))
except:
self._close_socks()
return Response(send_status_code(100, 'Request break connection'))
def _receiver(self, sock):
while True:
# Ждем пока придет запрос от сервера
request = self._recv_data(sock)
if request is None:
print(f'Client {self._client_name} connection break.')
return
if self._stoping:
break
request = Request(*request)
data = b''
if request.method in self._dict_methods_call:
response = self._dict_methods_call[request.method](request)
if request.method == 'stop':
break
# готовим ответ серверу
if type(response) == bytes:
data = response
response = None
# Дополняем статус возврата, если его нет по умолчанию 0.
if response is None or 'status' not in response.keys():
response = send_status_code(0, "success")
else:
response = send_status_code(100, "not method in request")
# отправляем ответ серверу
json = _json.dumps(response)
sock.send(len(json).to_bytes(4, "big") + len(data).to_bytes(4, "big") + bytes(json, 'utf-8') + data)
print_info(f'Client {self._client_name} disconnect {self._ip}:{self._port}')
self._close_socks()
def method(self, name_method):
# Декоратор. Храним ссылки на функции запросов от сервера по их названиям
def decorator(func):
self._dict_methods_call[name_method] = func
return decorator
class ServerDCTP(Thread):
def __init__(self, port=10_000):
Thread.__init__(self)
self.stoping = False
self.count_current_work = 0
self._workers = {}
self._clients = {}
self._port = port
self._dict_methods_call = {}
self.lock_obj = RLock()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._port < 10_000:
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while True:
try:
self._sock.bind(('', self._port))
break
except:
if self._port < 10000:
raise Exception(f'Error do not start server port {self._port}')
self._port = (self._port + 1) % 65635
print_info(f'Server DCTP started port: {self._port}')
self._sock.listen(100)
def stop(self):
self.stoping = True
while self.count_current_work != 0:
time.sleep(0.1)
self._sock.close()
def close_worker(self, id_worker):
worker = self._workers.pop(id_worker)
for type_sock in worker:
try:
worker[type_sock].close()
except:
pass
def run(self):
while True:
try:
response = None
# Ждем подключения клиента
worker_sock, _ = self._sock.accept()
if self.stoping:
return
worker_sock.settimeout(5)
try:
# получаем длину сообщения
length_response = int.from_bytes(worker_sock.recv(4), 'big')
# получаем само сообщение
request = _json.loads(worker_sock.recv(length_response).decode('utf-8'))
except socket.timeout:
worker_sock.close()
continue
if not request:
worker_sock.close()
try:
# проверяем подключен ли клиент уже
if request["id_worker"] in self._workers and \
request["type"] in self._workers[request["id_worker"]]:
self._send_data(worker_sock, send_status_code(110, f'client already connect'))
print_warning(f'client {request["id_worker"]} already connect')
print(6666666666666666666666, self._workers[request["id_worker"]])
self.close_worker(request["id_worker"])
#continue
worker_sock.settimeout(None)
# проверка на валидность подключения через вызываемую функцию-декоратор connect_valid_client
valid_connect = True
if "connect_valid_client" in self._dict_methods_call:
valid_connect = self._dict_methods_call["connect_valid_client"](
Request(json={"id_worker": request["id_worker"]}))
if type(valid_connect) is not bool:
raise Exception(f'Method "connect_valid_client" return {type(valid_connect)} '
f'should be boolean argument')
# регистрируем клиента
if valid_connect:
if request["id_worker"] not in self._workers.keys():
print_info(f'Client "{request["id_worker"]}" connected. port:{self._port}')
self._workers[request['id_worker']] = self._workers.get(request['id_worker'], {})
self._workers[request['id_worker']][request['type']] = worker_sock
if request['type'] == "client to server":
receiver_thread = Thread(target=self._receiver,
args=(worker_sock, request['id_worker']))
receiver_thread.start()
if "on_connected" in self._dict_methods_call:
self._dict_methods_call["on_connected"](Request(json={"id_worker":
request["id_worker"]}))
self._send_data(worker_sock, send_status_code(0, "success"))
else:
self._send_data(worker_sock,
send_status_code(120, f'client {request["id_worker"]} is not valid'))
print_warning(f'client {request["id_worker"]} is not valid')
except socket.error:
print(99999999999, worker_sock)
if request and "id_worker" in request and request["id_worker"] in self._workers:
self.close_worker(request["id_worker"])
except Exception as e:
#except socket.error:
if self.stoping:
break
print(888888888888, self._sock)
print(888888888888, e)
@staticmethod
def _send_data(sock, json, data=b''):
json = _json.dumps(json)
sock.send(len(json).to_bytes(4, "big") + len(data).to_bytes(4, "big") + bytes(json, 'utf-8') + data)
def _receiver(self, sock, id_worker):
lock_obj = RLock()
# Ждем пока придет запрос и вызываем соответствующий метод
while True:
try:
len_id_client = int.from_bytes(sock.recv(4), 'big')
with lock_obj:
id_client = sock.recv(len_id_client).decode('utf-8')
request = sock.recv(int.from_bytes(sock.recv(4), 'big'))
data = sock.recv(int.from_bytes(sock.recv(4), 'big'))
request = _json.loads(request.decode('utf-8'))
if self.stoping:
return
request.update({'id_worker': id_worker, 'id_client': id_client})
self.count_current_work += 1
try:
response = self._dict_methods_call[request['method']](Request(json=request, data=data))
except Exception as e:
print(e)
time.sleep(0.1)
response = send_status_code(100, f'method {request["method"]} not ready or does not exist')
self.count_current_work -= 1
if response is None:
response = {}
response.update({'id_worker': id_worker, 'id_client': id_client})
if 'status' not in response.keys():
response.update(send_status_code(0, "success"))
self._send_data(sock, json=response)
except:
if id_worker in self._workers:
self.close_worker(id_worker)
if 'on_disconnected' in self._dict_methods_call:
self._dict_methods_call['on_disconnected'](Request(json={"id_worker": id_worker}))
break
def request(self, id_worker, method, json={}, data=b'', timeout=10):
# Отправляем запрос клиенту и принимаем ответ
if id_worker not in self._workers:
return Response(send_status_code(100, f'Client {id_worker} is not connect'))
if type(data) != bytes:
return Response(send_status_code(100, 'Parameter data is not bytes'))
json['method'] = method
json = _json.dumps(json)
try:
# отправляем запрос
sock = self._workers[id_worker]['server to client']
with self.lock_obj:
sock.send(len(json).to_bytes(4, "big") + len(data).to_bytes(4, "big") + bytes(json, 'utf-8') + data)
# принимаем ответ
sock.settimeout(timeout)
length_json = int.from_bytes(sock.recv(4), 'big')
length_data = int.from_bytes(sock.recv(4), 'big')
response = Response(_json.loads(sock.recv(length_json).decode('utf-8')),
sock.recv(length_data).decode('utf-8'))
sock.settimeout(None)
return response
except:
# если обрыв соединения
if id_worker in self._workers:
self.close_worker(id_worker)
#raise Exception(f'Client connection break {id_worker} port {self._port}')
return Response(send_status_code(100, 'Request break connection'))
def method(self, name_method: str):
# Декоратор. Храним ссылки на функции методов по их названиям
def decorator(func):
self._dict_methods_call[name_method] = func
return decorator
@property
def current_port(self):
return self._port
def get_workers(self):
return tuple(self._workers.keys())
def get_count_workers(self):
return len(self._workers) |
__init__.py | import argparse
import gettext
import os
import sys
import threading
import time
from collections import defaultdict
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tempfile import TemporaryDirectory
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from .generate import generate
class HovercraftEventHandler(FileSystemEventHandler):
def __init__(self, filelist):
self.filelist = filelist
self.quit = False
super().__init__()
def on_modified(self, event):
self._update(event.src_path)
def on_created(self, event):
self._update(event.src_path)
def on_moved(self, event):
self._update(event.dest_path)
def _update(self, src_path):
if self.quit:
return
if src_path in self.filelist:
print("File %s modified, update presentation" % src_path)
self.quit = True
def generate_and_observe(args, event):
while event.isSet():
# Generate the presentation
monitor_list = generate(args)
print("Presentation generated.")
# Make a list of involved directories
directories = defaultdict(list)
for file in monitor_list:
directory, filename = os.path.split(file)
directories[directory].append(filename)
observer = Observer()
handler = HovercraftEventHandler(monitor_list)
for directory, files in directories.items():
observer.schedule(handler, directory, recursive=False)
observer.start()
while event.wait(1):
time.sleep(0.05)
if handler.quit:
break
observer.stop()
observer.join()
def main():
# That the argparse default strings are lowercase is ugly.
def my_gettext(s):
return s.capitalize()
gettext.gettext = my_gettext
parser = argparse.ArgumentParser(
description='Create impress.js presentations with reStructuredText',
add_help=False)
parser.add_argument(
'presentation',
metavar='<presentation>',
help='The path to the reStructuredText presentation file.')
parser.add_argument(
'targetdir',
metavar='<targetdir>',
nargs='?',
help=('The directory where the presentation is saved. Will be created '
'if it does not exist. If you do not specify a targetdir '
'Hovercraft! will instead start a webserver and serve the '
'presentation from that server.'))
parser.add_argument(
'-h', '--help',
action='help',
help='Show this help.')
parser.add_argument(
'-t',
'--template',
help=('Specify a template. Must be a .cfg file, or a directory with a '
'template.cfg file. If not given it will use a default template.'))
parser.add_argument(
'-c',
'--css',
help=('An additional css file for the presentation to use. '
'See also the ``:css:`` settings of the presentation.'))
parser.add_argument(
'-j',
'--js',
help=('An additional javascript file for the presentation to use. Added as a js-body script.'
'See also the ``:js-body:`` settings of the presentation.'))
parser.add_argument(
'-a',
'--auto-console',
action='store_true',
help=('Open the presenter console automatically. This is useful when '
'you are rehearsing and making sure the presenter notes are '
'correct. You can also set this by having ``:auto-console: '
'true`` first in the presentation.'))
parser.add_argument(
'-s',
'--skip-help',
action='store_true',
help=('Do not show the initial help popup.'))
parser.add_argument(
'-n',
'--skip-notes',
action='store_true',
help=('Do not include presenter notes in the output.'))
parser.add_argument(
'-p',
'--port',
default='0.0.0.0:8000',
help=('The address and port that the server uses. '
'Ex 8080 or 127.0.0.1:9000. Defaults to 0.0.0.0:8000.'))
parser.add_argument(
'--mathjax',
default=os.environ.get('HOVERCRAFT_MATHJAX', 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-MML-AM_CHTML'),
help=('The URL to the mathjax library.'
' (It will only be used if you have rST ``math::`` in your document)'))
parser.add_argument(
'-N',
'--slide-numbers',
action='store_true',
help=('Show slide numbers during the presentation.'))
args = parser.parse_args()
# XXX Bit of a hack, clean this up, I check for this twice, also in the template.
if args.template and args.template not in ('simple', 'default'):
args.template = os.path.abspath(args.template)
if args.targetdir:
# Generate the presentation
generate(args)
else:
# Server mode. Start a server that serves a temporary directory.
with TemporaryDirectory() as targetdir:
args.targetdir = targetdir
args.presentation = os.path.abspath(args.presentation)
# Set up watchdog to regenerate presentation if saved.
event = threading.Event()
event.set()
thread = threading.Thread(target=generate_and_observe, args=(args, event))
try:
# Serve presentation
if ':' in args.port:
bind, port = args.port.split(':')
else:
bind, port = '0.0.0.0', args.port
port = int(port)
# First create the server. This checks that we can connect to
# the port we want to.
os.chdir(targetdir)
server = HTTPServer((bind, port), SimpleHTTPRequestHandler)
print("Serving HTTP on", bind, "port", port, "...")
try:
# Now generate the presentation
thread.start()
try:
# All is good, start the server
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
finally:
# Server exited
server.server_close()
finally:
# Stop the generation thread
event.clear()
# Wait for it to end
thread.join()
except PermissionError:
print("Can't bind to port %s:%s: No permission" % (bind, port))
except OSError as e:
if e.errno == 98:
print("Can't bind to port %s:%s: port already in use" % (bind, port))
else:
raise
|
display.py | from RPLCD.i2c import BaseCharLCD
import threading
import time
import uuid
class Panel:
def __init__(self, display, changed_listener):
self.display = display
self.changed_listener = changed_listener
self.__text = ""
self.__ttl = -1
self.ttl_watchdog_id = None
@property
def ttl(self) -> int:
return self.__ttl
@property
def text(self) -> str:
return self.__text
def update_text(self, text: str):
self.__text = text
self.display.on_panel_updated()
self.changed_listener()
def update_ttl(self, ttl: int):
self.__ttl = ttl
wdid = str(uuid.uuid4())
self.ttl_watchdog_id = wdid
threading.Thread(target = self.run_expire_watchdog, args = (ttl, wdid)).start()
def run_expire_watchdog(self, ttl: int, wdid: str):
remaining_sec = ttl
while (remaining_sec > 0):
time.sleep(1)
remaining_sec = remaining_sec - 1
if self.ttl_watchdog_id == wdid:
if remaining_sec > 0:
self.__ttl = remaining_sec
self.changed_listener()
else:
self.clear()
def clear(self):
self.__text = ""
self.__ttl = -1
self.display.on_panel_updated()
self.changed_listener()
def is_empty(self):
return len(self.__text) == 0
class Display:
LAYER_UPPER = 0
LAYER_MIDDLE = 1
LAYER_LOWER = 2
def __init__(self, lcd: BaseCharLCD, changed_listener):
self.__lcd = lcd
self.__text = ""
self.changed_listener = changed_listener
self.panels = [Panel(self, changed_listener), Panel(self, changed_listener), Panel(self, changed_listener)]
def panel(self, layer: int) -> Panel:
return self.panels[layer]
@property
def text(self) -> str:
return self.__text
def __update_text(self, text: str):
self.__text = text
self.__lcd.clear()
self.__lcd.write_string(self.__text)
self.changed_listener()
def on_panel_updated(self):
text = ""
for layer in range(0, len(self.panels)):
panel = self.panels[layer]
if not panel.is_empty():
text = panel.text
break
self.__update_text(text) |
sync_server.py | #!/usr/bin/env python
"""
Pymodbus Synchronous Server Example
--------------------------------------------------------------------------
The synchronous server is implemented in pure python without any third
party libraries (unless you need to use the serial protocols which require
pyserial). This is helpful in constrained or old environments where using
twisted is just not feasible. What follows is an example of its use:
"""
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
from pymodbus.version import version
from pymodbus.server.sync import StartTcpServer
from pymodbus.server.sync import StartTlsServer
from pymodbus.server.sync import StartUdpServer
from pymodbus.server.sync import StartSerialServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock, ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusBinaryFramer
from multiprocessing import Queue, Process
import time
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s'
' %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def run_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
# The datastores only respond to the addresses that they are initialized to
# Therefore, if you initialize a DataBlock to addresses of 0x00 to 0xFF, a
# request to 0x100 will respond with an invalid address exception. This is
# because many devices exhibit this kind of behavior (but not all)::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
#
# Continuing, you can choose to use a sequential or a sparse DataBlock in
# your data context. The difference is that the sequential has no gaps in
# the data while the sparse can. Once again, there are devices that exhibit
# both forms of behavior::
#
# block = ModbusSparseDataBlock({0x00: 0, 0x05: 1})
# block = ModbusSequentialDataBlock(0x00, [0]*5)
#
# Alternately, you can use the factory methods to initialize the DataBlocks
# or simply do not pass them to have them initialized to 0x00 on the full
# address range::
#
# store = ModbusSlaveContext(di = ModbusSequentialDataBlock.create())
# store = ModbusSlaveContext()
#
# Finally, you are allowed to use the same DataBlock reference for every
# table or you may use a separate DataBlock for each table.
# This depends if you would like functions to be able to access and modify
# the same data or not::
#
# block = ModbusSequentialDataBlock(0x00, [0]*0xff)
# store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
#
# The server then makes use of a server context that allows the server to
# respond with different slave contexts for different unit ids. By default
# it will return the same context for every unit id supplied (broadcast
# mode).
# However, this can be overloaded by setting the single flag to False and
# then supplying a dictionary of unit id to context mapping::
#
# slaves = {
# 0x01: ModbusSlaveContext(...),
# 0x02: ModbusSlaveContext(...),
# 0x03: ModbusSlaveContext(...),
# }
# context = ModbusServerContext(slaves=slaves, single=False)
#
# The slave context can also be initialized in zero_mode which means that a
# request to address(0-7) will map to the address (0-7). The default is
# False which is based on section 4.4 of the specification, so address(0-7)
# will map to (1-8)::
#
# store = ModbusSlaveContext(..., zero_mode=True)
# ----------------------------------------------------------------------- #
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(1000, [17]*100),
co=ModbusSequentialDataBlock(2000, [17]*100),
hr=ModbusSequentialDataBlock(3000, [17]*100),
ir=ModbusSequentialDataBlock(4000, [17]*100))
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
# If you don't set this or any fields, they are defaulted to empty strings.
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'Pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'Pymodbus Server'
identity.ModelName = 'Pymodbus Server'
identity.MajorMinorRevision = version.short()
def test(context):
time.sleep(5)
x = 0
while True:
time.sleep(0.5)
context[0].setValues(3, 3001, [x]);
x+=1
regs = context[0].getValues(2, 1000, 20); print("reg:", regs)
regs = context[0].getValues(1, 2000, 20); print("reg:", regs)
regs = context[0].getValues(3, 3000, 20); print("reg:", regs)
regs = context[0].getValues(4, 4000, 20); print("reg:", regs)
test_th = Process(target=test, args=(context,))
test_th.start()
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
# Tcp:
StartTcpServer(context, identity=identity, address=("25.101.201.240", 5020))
#
# TCP with different framer
# StartTcpServer(context, identity=identity,
# framer=ModbusRtuFramer, address=("0.0.0.0", 5020))
# TLS
# StartTlsServer(context, identity=identity, certfile="server.crt",
# keyfile="server.key", address=("0.0.0.0", 8020))
# Udp:
# StartUdpServer(context, identity=identity, address=("0.0.0.0", 5020))
# socat -d -d PTY,link=/tmp/ptyp0,raw,echo=0,ispeed=9600 PTY,link=/tmp/ttyp0,raw,echo=0,ospeed=9600
# Ascii:
# StartSerialServer(context, identity=identity,
# port='/dev/ttyp0', timeout=1)
# RTU:
# StartSerialServer(context, framer=ModbusRtuFramer, identity=identity,
# port='/tmp/ttyp0', timeout=.005, baudrate=9600)
# Binary
# StartSerialServer(context,
# identity=identity,
# framer=ModbusBinaryFramer,
# port='/dev/ttyp0',
# timeout=1)
if __name__ == "__main__":
run_server()
|
fg.py | from threading import Thread
exit_flag = False
def f():
global exit_flag
i = 1
while not exit_flag:
i = (i + 1) % 100000000
if i % 100000 == 0: print("f making progress: {0}".format(i))
def g():
global exit_flag
j = 1
while not exit_flag:
j = (j - 1) % 100000000
if j % 100000 == 0: print("g making progress: {0}".format(j))
from threading import Thread
if __name__ == '__main__':
ts = Thread(target=f,name="F_thread"), Thread(target=g,name="G_thread")
for t in ts: t.start()
k = 1
while not exit_flag:
k = (k * 2) % 100000000
|
template.py | from __future__ import print_function
import base64
import random
from builtins import object, str
# Empire imports
from typing import List
from empire.server.common import agents, encryption, helpers, messages, packets
from empire.server.utils import data_util
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
"Name": "Template",
"Author": ["@harmj0y"],
"Description": ("Listener template"),
# categories - client_server, peer_to_peer, broadcast, third_party
"Category": ("client_server"),
"Comments": [],
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
"Name": {
"Description": "Name for the listener.",
"Required": True,
"Value": "http",
},
"Host": {
"Description": "Hostname/IP for staging.",
"Required": True,
"Value": "http://%s" % (helpers.lhost()),
},
"BindIP": {
"Description": "The IP to bind to on the control server.",
"Required": True,
"Value": "0.0.0.0",
},
"Port": {
"Description": "Port for the listener.",
"Required": True,
"Value": "",
},
"Launcher": {
"Description": "Launcher string.",
"Required": True,
"Value": "powershell -noP -sta -w 1 -enc ",
},
"StagingKey": {
"Description": "Staging key for initial agent negotiation.",
"Required": True,
"Value": "2c103f2c4ed1e59c0b4e2e01821770fa",
},
"DefaultDelay": {
"Description": "Agent delay/reach back interval (in seconds).",
"Required": True,
"Value": 5,
},
"DefaultJitter": {
"Description": "Jitter in agent reachback interval (0.0-1.0).",
"Required": True,
"Value": 0.0,
},
"DefaultLostLimit": {
"Description": "Number of missed checkins before exiting",
"Required": True,
"Value": 60,
},
"DefaultProfile": {
"Description": "Default communication profile for the agent.",
"Required": True,
"Value": "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
},
"CertPath": {
"Description": "Certificate path for https listeners.",
"Required": False,
"Value": "",
},
"KillDate": {
"Description": "Date for the listener to exit (MM/dd/yyyy).",
"Required": False,
"Value": "",
},
"WorkingHours": {
"Description": "Hours for the agent to operate (09:00-17:00).",
"Required": False,
"Value": "",
},
"ServerVersion": {
"Description": "Server header for the control server.",
"Required": True,
"Value": "Microsoft-IIS/7.5",
},
"StagerURI": {
"Description": "URI for the stager. Example: stager.php",
"Required": False,
"Value": "",
},
"UserAgent": {
"Description": "User-agent string to use for the staging request (default, none, or other).",
"Required": False,
"Value": "default",
},
"Proxy": {
"Description": "Proxy to use for request (default, none, or other).",
"Required": False,
"Value": "default",
},
"ProxyCreds": {
"Description": "Proxy credentials ([domain\]username:password) to use for request (default, none, or other).",
"Required": False,
"Value": "default",
},
"SlackURL": {
"Description": "Your Slack Incoming Webhook URL to communicate with your Slack instance.",
"Required": False,
"Value": "",
},
}
# required:
self.mainMenu = mainMenu
self.threads = {} # used to keep track of any threaded instances of this server
# optional/specific for this module
# set the default staging key to the controller db default
self.options["StagingKey"]["Value"] = str(
data_util.get_config("staging_key")[0]
)
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
print(
helpers.color(
"[!] default_response() not implemented for listeners/template"
)
)
return ""
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]["Required"] and (
str(self.options[key]["Value"]).strip() == ""
):
print(helpers.color('[!] Option "%s" is required.' % (key)))
return False
return True
def generate_launcher(
self,
encode=True,
obfuscate=False,
obfuscationCommand="",
userAgent="default",
proxy="default",
proxyCreds="default",
stagerRetries="0",
language=None,
safeChecks="",
listenerName=None,
bypasses: List[str] = None,
):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(
helpers.color(
"[!] listeners/template generate_launcher(): no language specified!"
)
)
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName][
"options"
]
host = listenerOptions["Host"]["Value"]
stagingKey = listenerOptions["StagingKey"]["Value"]
profile = listenerOptions["DefaultProfile"]["Value"]
uris = [a.strip("/") for a in profile.split("|")[0].split(",")]
stage0 = random.choice(uris)
launchURI = "%s/%s" % (host, stage0)
if language.startswith("po"):
# PowerShell
return ""
if language.startswith("py"):
# Python
return ""
else:
print(
helpers.color(
"[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module."
)
)
else:
print(
helpers.color(
"[!] listeners/template generate_launcher(): invalid listener name specification!"
)
)
def generate_stager(
self,
listenerOptions,
encode=False,
encrypt=True,
obfuscate=False,
obfuscationCommand="",
language=None,
):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
print(
helpers.color(
"[!] generate_stager() not implemented for listeners/template"
)
)
return ""
def generate_agent(
self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""
):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
print(
helpers.color("[!] generate_agent() not implemented for listeners/template")
)
return ""
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
This should be implemented for the module.
"""
if language:
if language.lower() == "powershell":
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (
listenerOptions["Host"]["Value"]
)
getTask = """
$script:GetTask = {
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
}
}
"""
return (
updateServers
+ getTask
+ sendMessage
+ "\n'New agent comms registered!'"
)
elif language.lower() == "python":
# send_message()
pass
else:
print(
helpers.color(
"[!] listeners/template generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module."
)
)
else:
print(
helpers.color(
"[!] listeners/template generate_comms(): no language specified!"
)
)
def start(self, name=""):
"""
If a server component needs to be started, implement the kick off logic
here and the actual server code in another function to facilitate threading
(i.e. start_server() in the http listener).
"""
# listenerOptions = self.options
# if name and name != '':
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
# else:
# name = listenerOptions['Name']['Value']
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
return True
def shutdown(self, name=""):
"""
If a server component was started, implement the logic that kills the particular
named listener here.
"""
# if name and name != '':
# print helpers.color("[!] Killing listener '%s'" % (name))
# self.threads[name].kill()
# else:
# print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
# self.threads[self.options['Name']['Value']].kill()
pass
|
optimization.py | import hashlib
import json
from copy import copy
from datetime import datetime
from itertools import product
from logging import getLogger
from threading import Thread, Event
from time import time
from typing import List, Set, Union, Any, Sequence, Optional, Mapping, Callable
from .job import TrainsJob
from .parameters import Parameter
from ..logger import Logger
from ..backend_api.services import workers as workers_service, tasks as tasks_services
from ..task import Task
logger = getLogger('trains.automation.optimization')
try:
import pandas as pd
Task.add_requirements('pandas')
except ImportError:
pd = None
logger.warning('Pandas is not installed, summary table reporting will be skipped.')
class Objective(object):
"""
Optimization ``Objective`` class to maximize / minimize over all experiments. This class will sample a specific
scalar from all experiments, and maximize / minimize over single scalar (i.e., title and series combination).
``SearchStrategy`` and ``HyperParameterOptimizer`` use ``Objective`` in the strategy search algorithm.
"""
def __init__(self, title, series, order='max', extremum=False):
# type: (str, str, str, bool) -> ()
"""
Construct ``Objective`` object that will return the scalar value for a specific task ID.
:param str title: The scalar graph title to sample from.
:param str series: The scalar series title to sample from.
:param str order: The setting for maximizing or minimizing the objective scalar value.
The values are:
- ``max``
- ``min``
:param bool extremum: Return the global minimum / maximum reported metric value
The values are:
- ``True`` - Return the global minimum / maximum reported metric value.
- ``False`` - Return the last value reported for a specific Task. (Default)
"""
self.title = title
self.series = series
assert order in ('min', 'max',)
# normalize value so we always look for the highest objective value
self.sign = -1 if (isinstance(order, str) and order.lower().strip() == 'min') else +1
self._metric = None
self.extremum = extremum
def get_objective(self, task_id):
# type: (Union[str, Task, TrainsJob]) -> Optional[float]
"""
Return a specific task scalar value based on the objective settings (title/series).
:param str task_id: The Task id to retrieve scalar from (or ``TrainsJob`` object).
:return: The scalar value.
"""
# create self._metric
self._get_last_metrics_encode_field()
if isinstance(task_id, Task):
task_id = task_id.id
elif isinstance(task_id, TrainsJob):
task_id = task_id.task_id()
# noinspection PyBroadException, Py
try:
# noinspection PyProtectedMember
task = Task._query_tasks(
task_ids=[task_id], only_fields=['last_metrics.{}.{}'.format(self._metric[0], self._metric[1])])[0]
except Exception:
return None
metrics = task.last_metrics
# noinspection PyBroadException
try:
values = metrics[self._metric[0]][self._metric[1]]
if not self.extremum:
return values['value']
return values['min_value'] if self.sign < 0 else values['max_value']
except Exception:
return None
def get_current_raw_objective(self, task):
# type: (Union[TrainsJob, Task]) -> (int, float)
"""
Return the current raw value (without sign normalization) of the objective.
:param str task: The Task or Job to retrieve scalar from (or ``TrainsJob`` object).
:return: Tuple(iteration, value) if, and only if, the metric exists. None if the metric does not exist.
"""
if not isinstance(task, Task):
if hasattr(task, 'task'):
task = task.task
if not isinstance(task, Task):
task = Task.get_task(task_id=str(task))
if not task:
raise ValueError("Task object could not be found")
# todo: replace with more efficient code
scalars = task.get_reported_scalars()
# noinspection PyBroadException
try:
return scalars[self.title][self.series]['x'][-1], scalars[self.title][self.series]['y'][-1]
except Exception:
return None
def get_objective_sign(self):
# type: () -> float
"""
Return the sign of the objective.
- ``+1`` - If maximizing
- ``-1`` - If minimizing
:return: Objective function sign.
"""
return self.sign
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self.title, self.series
def get_normalized_objective(self, task_id):
# type: (Union[str, Task, TrainsJob]) -> Optional[float]
"""
Return a normalized task scalar value based on the objective settings (title/series).
I.e. objective is always to maximize the returned value
:param str task_id: The Task id to retrieve scalar from.
:return: Normalized scalar value.
"""
objective = self.get_objective(task_id=task_id)
if objective is None:
return None
# normalize value so we always look for the highest objective value
return self.sign * objective
def _get_last_metrics_encode_field(self):
# type: () -> str
"""
Return encoded representation of the title/series metric.
:return: The objective title/series.
"""
if not self._metric:
title = hashlib.md5(str(self.title).encode('utf-8')).hexdigest()
series = hashlib.md5(str(self.series).encode('utf-8')).hexdigest()
self._metric = title, series
return '{}last_metrics.{}.{}.{}'.format(
'-' if self.sign > 0 else '', self._metric[0], self._metric[1],
('min_value' if self.sign < 0 else 'max_value') if self.extremum else 'value')
class Budget(object):
class Field(object):
def __init__(self, limit=None):
# type: (Optional[float]) -> ()
self.limit = limit
self.current = {}
def update(self, uid, value):
# type: (Union[str, int], float) -> ()
if value is not None:
try:
self.current[uid] = float(value)
except (TypeError, ValueError):
pass
@property
def used(self):
# type: () -> (Optional[float])
if self.limit is None or not self.current:
return None
return sum(self.current.values())/float(self.limit)
def __init__(self, jobs_limit, iterations_limit, compute_time_limit):
# type: (Optional[int], Optional[int], Optional[float]) -> ()
self.jobs = self.Field(jobs_limit)
self.iterations = self.Field(iterations_limit)
self.compute_time = self.Field(compute_time_limit)
def to_dict(self):
# type: () -> (Mapping[str, Mapping[str, float]])
# returned dict is Mapping[Union['jobs', 'iterations', 'compute_time'], Mapping[Union['limit', 'used'], float]]
current_budget = {}
jobs = self.jobs.used
current_budget['jobs'] = {'limit': self.jobs.limit, 'used': jobs if jobs else 0}
iterations = self.iterations.used
current_budget['iterations'] = {'limit': self.iterations.limit, 'used': iterations if iterations else 0}
compute_time = self.compute_time.used
current_budget['compute_time'] = {'limit': self.compute_time.limit, 'used': compute_time if compute_time else 0}
return current_budget
class SearchStrategy(object):
"""
The base search strategy class. Inherit this class to implement your custom strategy.
"""
_tag = 'optimization'
_job_class = TrainsJob # type: TrainsJob
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
min_iteration_per_job=None, # type: Optional[int]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a search strategy optimizer.
:param str base_task_id: The Task ID (str)
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When time limit is
exceeded, the job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int min_iteration_per_job: The minimum iterations (of the Objective metric) per single job (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric) per single job.
When maximum iterations is exceeded, the job is aborted. (Optional)
:param int total_max_jobs: The total maximum jobs for the optimization process. The default value is ``None``,
for unlimited.
"""
super(SearchStrategy, self).__init__()
self._base_task_id = base_task_id
self._hyper_parameters = hyper_parameters
self._objective_metric = objective_metric
self._execution_queue = execution_queue
self._num_concurrent_workers = num_concurrent_workers
self.pool_period_minutes = pool_period_min
self.time_limit_per_job = time_limit_per_job
self.compute_time_limit = compute_time_limit
self.max_iteration_per_job = max_iteration_per_job
self.min_iteration_per_job = min_iteration_per_job
self.total_max_jobs = total_max_jobs
self._stop_event = Event()
self._current_jobs = []
self._pending_jobs = []
self._num_jobs = 0
self._job_parent_id = None
self._created_jobs_ids = {}
self._naming_function = None
self._job_project = {}
self.budget = Budget(
jobs_limit=self.total_max_jobs,
compute_time_limit=self.compute_time_limit if self.compute_time_limit else None,
iterations_limit=self.total_max_jobs * self.max_iteration_per_job if
self.max_iteration_per_job and self.total_max_jobs else None
)
self._validate_base_task()
self._optimizer_task = None
def start(self):
# type: () -> ()
"""
Start the Optimizer controller function loop(). If the calling process is stopped, the controller will stop
as well.
.. important::
This function returns only after the optimization is completed or :meth:`stop` was called.
"""
counter = 0
while True:
logger.debug('optimization loop #{}'.format(counter))
if not self.process_step():
break
if self._stop_event.wait(timeout=self.pool_period_minutes * 60.):
break
counter += 1
def stop(self):
# type: () -> ()
"""
Stop the current running optimization loop. Called from a different thread than the :meth:`start`.
"""
self._stop_event.set()
def process_step(self):
# type: () -> bool
"""
Abstract helper function. Implementation is not required. Default use in start default implementation
Main optimization loop, called from the daemon thread created by :meth:`start`.
- Call monitor job on every ``TrainsJob`` in jobs:
- Check the performance or elapsed time, and then decide whether to kill the jobs.
- Call create_job:
- Check if spare job slots exist, and if they do call create a new job based on previous tested experiments.
:return: True, if continue the optimization. False, if immediately stop.
"""
updated_jobs = []
for job in self._current_jobs:
if self.monitor_job(job):
updated_jobs.append(job)
self._current_jobs = updated_jobs
pending_jobs = []
for job in self._pending_jobs:
if job.is_pending():
pending_jobs.append(job)
else:
self.budget.jobs.update(job.task_id(), 1)
self._pending_jobs = pending_jobs
free_workers = self._num_concurrent_workers - len(self._current_jobs)
# do not create more jobs if we hit the limit
if self.total_max_jobs and self._num_jobs >= self.total_max_jobs:
return bool(self._current_jobs)
# see how many free slots we have and create job
for i in range(max(0, free_workers)):
new_job = self.create_job()
if not new_job:
break
self._num_jobs += 1
new_job.launch(self._execution_queue)
self._current_jobs.append(new_job)
self._pending_jobs.append(new_job)
return bool(self._current_jobs)
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Abstract helper function. Implementation is not required. Default use in process_step default implementation
Create a new job if needed. return the newly created job. If no job needs to be created, return ``None``.
:return: A Newly created TrainsJob object, or None if no TrainsJob created.
"""
return None
def monitor_job(self, job):
# type: (TrainsJob) -> bool
"""
Helper function, Implementation is not required. Default use in process_step default implementation.
Check if the job needs to be aborted or already completed.
If returns ``False``, the job was aborted / completed, and should be taken off the current job list
If there is a budget limitation, this call should update
``self.budget.compute_time.update`` / ``self.budget.iterations.update``
:param TrainsJob job: A ``TrainsJob`` object to monitor.
:return: False, if the job is no longer relevant.
"""
abort_job = self.update_budget_per_job(job)
if abort_job:
job.abort()
return False
return not job.is_stopped()
def update_budget_per_job(self, job):
abort_job = False
if self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if elapsed > self.time_limit_per_job:
abort_job = True
if self.compute_time_limit:
if not self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if self.max_iteration_per_job:
iterations = self._get_job_iterations(job)
if iterations > 0:
self.budget.iterations.update(job.task_id(), iterations)
if iterations > self.max_iteration_per_job:
abort_job = True
return abort_job
def get_running_jobs(self):
# type: () -> Sequence[TrainsJob]
"""
Return the current running TrainsJobs.
:return: List of TrainsJob objects.
"""
return self._current_jobs
def get_created_jobs_ids(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now, including completed and running jobs.
The values of the returned dict are the parameters used in the specific job
:return: dict of task IDs (str) as keys, and their parameters dict as values.
"""
return {job_id: job_val[1] for job_id, job_val in self._created_jobs_ids.items()}
def get_created_jobs_tasks(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now.
The values of the returned dict are the TrainsJob.
:return: dict of task IDs (str) as keys, and their TrainsJob as values.
"""
return {job_id: job_val[0] for job_id, job_val in self._created_jobs_ids.items()}
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
# noinspection PyProtectedMember
top_tasks = self._get_child_tasks(
parent_task_id=self._job_parent_id or self._base_task_id,
order_by=self._objective_metric._get_last_metrics_encode_field(),
additional_filters={'page_size': int(top_k), 'page': 0})
return top_tasks
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self._objective_metric.get_objective_metric()
def helper_create_job(
self,
base_task_id, # type: str
parameter_override=None, # type: Optional[Mapping[str, str]]
task_overrides=None, # type: Optional[Mapping[str, str]]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> TrainsJob
"""
Create a Job using the specified arguments, ``TrainsJob`` for details.
:return: A newly created Job instance.
"""
if parameter_override:
param_str = ['{}={}'.format(k, parameter_override[k]) for k in sorted(parameter_override.keys())]
if self._naming_function:
name = self._naming_function(self._base_task_name, parameter_override)
elif self._naming_function is False:
name = None
else:
name = '{}: {}'.format(self._base_task_name, ' '.join(param_str))
comment = '\n'.join(param_str)
else:
name = None
comment = None
tags = (tags or []) + [self._tag, 'opt' + (': {}'.format(self._job_parent_id) if self._job_parent_id else '')]
new_job = self._job_class(
base_task_id=base_task_id, parameter_override=parameter_override,
task_overrides=task_overrides, tags=tags, parent=parent or self._job_parent_id,
name=name, comment=comment, project=self._get_task_project(parent or self._job_parent_id), **kwargs)
self._created_jobs_ids[new_job.task_id()] = (new_job, parameter_override)
logger.info('Creating new Task: {}'.format(parameter_override))
return new_job
def set_job_class(self, job_class):
# type: (TrainsJob) -> ()
"""
Set the class to use for the :meth:`helper_create_job` function.
:param TrainsJob job_class: The Job Class type.
"""
self._job_class = job_class
def set_job_default_parent(self, job_parent_task_id):
# type: (str) -> ()
"""
Set the default parent for all Jobs created by the :meth:`helper_create_job` method.
:param str job_parent_task_id: The parent Task ID.
"""
self._job_parent_id = job_parent_task_id
def set_job_naming_scheme(self, naming_function):
# type: (Optional[Callable[[str, dict], str]]) -> ()
"""
Set the function used to name a newly created job.
:param callable naming_function:
.. code-block:: py
naming_functor(base_task_name, argument_dict) -> str
"""
self._naming_function = naming_function
def set_optimizer_task(self, task):
# type: (Task) -> ()
"""
Set the optimizer task object to be used to store/generate reports on the optimization process.
Usually this is the current task of this process.
:param Task task: The optimizer's current Task.
"""
self._optimizer_task = task
def _validate_base_task(self):
# type: () -> ()
"""
Check the base task exists and contains the requested Objective metric and hyper parameters.
"""
# check if the task exists
try:
task = Task.get_task(task_id=self._base_task_id)
self._base_task_name = task.name
except ValueError:
raise ValueError("Could not find base task id {}".format(self._base_task_id))
# check if the hyper-parameters exist:
task_parameters = task.get_parameters_as_dict()
missing_params = [h.name for h in self._hyper_parameters if h.name not in task_parameters]
if missing_params:
logger.warning('Could not find requested hyper-parameters {} on base task {}'.format(
missing_params, self._base_task_id))
# check if the objective metric exists (i.e. no typos etc)
if self._objective_metric.get_objective(self._base_task_id) is None:
logger.warning('Could not find requested metric {} report on base task {}'.format(
self._objective_metric.get_objective_metric(), self._base_task_id))
def _get_task_project(self, parent_task_id):
# type: (str) -> (Optional[str])
if not parent_task_id:
return
if parent_task_id not in self._job_project:
task = Task.get_task(task_id=parent_task_id)
self._job_project[parent_task_id] = task.project
return self._job_project.get(parent_task_id)
def _get_job_iterations(self, job):
# type: (Union[TrainsJob, Task]) -> int
iteration_value = self._objective_metric.get_current_raw_objective(job)
return iteration_value[0] if iteration_value else -1
@classmethod
def _get_child_tasks(
cls,
parent_task_id, # type: str
status=None, # type: Optional[Task.TaskStatusEnum]
order_by=None, # type: Optional[str]
additional_filters=None # type: Optional[dict]
):
# type: (...) -> (Sequence[Task])
"""
Helper function. Return a list of tasks tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task objects
"""
task_filter = {'parent': parent_task_id,
# 'tags': [cls._tag],
'system_tags': ['-archived']}
task_filter.update(additional_filters or {})
if status:
task_filter['status'] = status
if order_by and (order_by.startswith('last_metrics') or order_by.startswith('-last_metrics')):
parts = order_by.split('.')
if parts[-1] in ('min', 'max', 'last'):
title = hashlib.md5(str(parts[1]).encode('utf-8')).hexdigest()
series = hashlib.md5(str(parts[2]).encode('utf-8')).hexdigest()
minmax = 'min_value' if 'min' in parts[3] else ('max_value' if 'max' in parts[3] else 'value')
order_by = '{}last_metrics.'.join(
('-' if order_by and order_by[0] == '-' else '', title, series, minmax))
if order_by:
task_filter['order_by'] = [order_by]
return Task.get_tasks(task_filter=task_filter)
class GridSearch(SearchStrategy):
"""
Grid search strategy controller. Full grid sampling of every hyper-parameter combination.
"""
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a grid search optimizer
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When the time limit is
exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job, When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(GridSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._param_iterator = None
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created TrainsJob object, or None if no TrainsJob is created.
"""
try:
parameters = self._next_configuration()
except StopIteration:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
def _next_configuration(self):
# type: () -> Mapping[str, str]
def param_iterator_fn():
hyper_params_values = [p.to_list() for p in self._hyper_parameters]
for state in product(*hyper_params_values):
yield dict(kv for d in state for kv in d.items())
if not self._param_iterator:
self._param_iterator = param_iterator_fn()
return next(self._param_iterator)
class RandomSearch(SearchStrategy):
"""
Random search strategy controller. Random uniform sampling of hyper-parameters.
"""
# Number of already chosen random samples before assuming we covered the entire hyper-parameter space
_hp_space_cover_samples = 42
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a random search optimizer.
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum umber of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes,
when time limit is exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job. When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(RandomSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._hyper_parameters_collection = set()
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created TrainsJob object, or None if no TrainsJob created
"""
parameters = None
# maximum tries to ge a random set that is not already in the collection
for i in range(self._hp_space_cover_samples):
parameters = {}
for p in self._hyper_parameters:
parameters.update(p.get_value())
# hash the parameters dictionary
param_hash = hash(json.dumps(parameters, sort_keys=True))
# if this is a new set of parameters, use it.
if param_hash not in self._hyper_parameters_collection:
self._hyper_parameters_collection.add(param_hash)
break
# try again
parameters = None
# if we failed to find a random set of parameters, assume we selected all of them
if not parameters:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
class HyperParameterOptimizer(object):
"""
Hyper-parameter search controller. Clones the base experiment, changes arguments and tries to maximize/minimize
the defined objective.
"""
_tag = 'optimization'
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric_title, # type: str
objective_metric_series, # type: str
objective_metric_sign='min', # type: str
optimizer_class=RandomSearch, # type: type(SearchStrategy)
max_number_of_concurrent_tasks=10, # type: int
execution_queue='default', # type: str
optimization_time_limit=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
auto_connect_task=True, # type: bool
always_create_task=False, # type: bool
**optimizer_kwargs # type: Any
):
# type: (...) -> ()
"""
Create a new hyper-parameter controller. The newly created object will launch and monitor the new experiments.
:param str base_task_id: The Task ID to be used as template experiment to optimize.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyper-parameter search
:param int max_number_of_concurrent_tasks: The maximum number of concurrent Tasks (experiments) running at the
same time.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param float optimization_time_limit: The maximum time (minutes) for the entire optimization process. The
default is ``None``, indicating no time limit.
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param bool auto_connect_task: Store optimization arguments and configuration in the Task
The values are:
- ``True`` - The optimization argument and configuration will be stored in the Task. All arguments will
be under the hyper-parameter section as ``opt/<arg>``, and the hyper_parameters will stored in the
Task ``connect_configuration`` (see artifacts/hyper-parameter).
- ``False`` - Do not store with Task.
:param bool always_create_task: Always create a new Task
The values are:
- ``True`` - No current Task initialized. Create a new task named ``optimization`` in the ``base_task_id``
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param ** optimizer_kwargs: Arguments passed directly to the optimizer constructor.
Example:
.. code-block:: py
:linenos:
:caption: Example
from trains import Task
from trains.automation import UniformParameterRange, DiscreteParameterRange
from trains.automation import GridSearch, RandomSearch, HyperParameterOptimizer
task = Task.init('examples', 'HyperParameterOptimizer example')
an_optimizer = HyperParameterOptimizer(
base_task_id='fa30fa45d95d4927b87c323b5b04dc44',
hyper_parameters=[
UniformParameterRange('lr', min_value=0.01, max_value=0.3, step_size=0.05),
DiscreteParameterRange('network', values=['ResNet18', 'ResNet50', 'ResNet101']),
],
objective_metric_title='title',
objective_metric_series='series',
objective_metric_sign='min',
max_number_of_concurrent_tasks=5,
optimizer_class=RandomSearch,
execution_queue='workers', time_limit_per_job=120, pool_period_min=0.2)
# This will automatically create and print the optimizer new task id
# for later use. if a Task was already created, it will use it.
an_optimizer.set_time_limit(in_minutes=10.)
an_optimizer.start()
# we can create a pooling loop if we like
while not an_optimizer.reached_time_limit():
top_exp = an_optimizer.get_top_experiments(top_k=3)
print(top_exp)
# wait until optimization completed or timed-out
an_optimizer.wait()
# make sure we stop all jobs
an_optimizer.stop()
"""
# create a new Task, if we do not have one already
self._task = Task.current_task()
if not self._task and always_create_task:
base_task = Task.get_task(task_id=base_task_id)
self._task = Task.init(
project_name=base_task.get_project_name(),
task_name='Optimizing: {}'.format(base_task.name),
task_type=Task.TaskTypes.optimizer,
)
opts = dict(
base_task_id=base_task_id,
objective_metric_title=objective_metric_title,
objective_metric_series=objective_metric_series,
objective_metric_sign=objective_metric_sign,
max_number_of_concurrent_tasks=max_number_of_concurrent_tasks,
execution_queue=execution_queue,
optimization_time_limit=optimization_time_limit,
compute_time_limit=compute_time_limit,
optimizer_kwargs=optimizer_kwargs)
# make sure all the created tasks are our children, as we are creating them
if self._task:
self._task.add_tags([self._tag])
if auto_connect_task:
optimizer_class, hyper_parameters, opts = self._connect_args(
optimizer_class=optimizer_class, hyper_param_configuration=hyper_parameters, **opts)
self.base_task_id = opts['base_task_id']
self.hyper_parameters = hyper_parameters
self.max_number_of_concurrent_tasks = opts['max_number_of_concurrent_tasks']
self.execution_queue = opts['execution_queue']
self.objective_metric = Objective(
title=opts['objective_metric_title'], series=opts['objective_metric_series'],
order='min' if opts['objective_metric_sign'] in ('min', 'min_global') else 'max',
extremum=opts['objective_metric_sign'].endswith('_global'))
# if optimizer_class is an instance, use it as is.
if type(optimizer_class) != type:
self.optimizer = optimizer_class
else:
self.optimizer = optimizer_class(
base_task_id=opts['base_task_id'], hyper_parameters=hyper_parameters,
objective_metric=self.objective_metric, execution_queue=opts['execution_queue'],
num_concurrent_workers=opts['max_number_of_concurrent_tasks'],
compute_time_limit=opts['compute_time_limit'], **opts.get('optimizer_kwargs', {}))
self.optimizer.set_optimizer_task(self._task)
self.optimization_timeout = None
self.optimization_start_time = None
self._thread = None
self._stop_event = None
self._report_period_min = 5.
self._thread_reporter = None
self._experiment_completed_cb = None
if self._task:
self.optimizer.set_job_default_parent(self._task.id)
self.set_time_limit(in_minutes=opts['optimization_time_limit'])
def get_num_active_experiments(self):
# type: () -> int
"""
Return the number of current active experiments.
:return: The number of active experiments.
"""
if not self.optimizer:
return 0
return len(self.optimizer.get_running_jobs())
def get_active_experiments(self):
# type: () -> Sequence[Task]
"""
Return a list of Tasks of the current active experiments.
:return: A list of Task objects, representing the current active experiments.
"""
if not self.optimizer:
return []
return [j.task for j in self.optimizer.get_running_jobs()]
def start(self, job_complete_callback=None):
# type: (Optional[Callable[[str, float, int, dict, str], None]]) -> bool
"""
Start the HyperParameterOptimizer controller. If the calling process is stopped, then the controller stops
as well.
:param Callable job_complete_callback: Callback function, called when a job is completed.
.. code-block:: py
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
if not self.optimizer:
return False
if self._thread:
return True
self.optimization_start_time = time()
self._experiment_completed_cb = job_complete_callback
self._stop_event = Event()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._thread_reporter = Thread(target=self._report_daemon)
self._thread_reporter.daemon = True
self._thread_reporter.start()
return True
def stop(self, timeout=None, wait_for_reporter=True):
# type: (Optional[float], Optional[bool]) -> ()
"""
Stop the HyperParameterOptimizer controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
:param wait_for_reporter: Wait for reporter to flush data.
"""
if not self._thread or not self._stop_event or not self.optimizer:
if self._thread_reporter and wait_for_reporter:
self._thread_reporter.join()
return
_thread = self._thread
self._stop_event.set()
self.optimizer.stop()
# wait for optimizer thread
if timeout is not None:
_thread.join(timeout=timeout * 60.)
# stop all running tasks:
for j in self.optimizer.get_running_jobs():
j.abort()
# clear thread
self._thread = None
if wait_for_reporter:
# wait for reporter to flush
self._thread_reporter.join()
def is_active(self):
# type: () -> bool
"""
Is the optimization procedure active (still running)
The values are:
- ``True`` - The optimization procedure is active (still running).
- ``False`` - The optimization procedure is not active (not still running).
.. note::
If the daemon thread has not yet started, ``is_active`` returns ``True``.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._stop_event is None or self._thread is not None
def is_running(self):
# type: () -> bool
"""
Is the optimization controller is running
The values are:
- ``True`` - The optimization procedure is running.
- ``False`` - The optimization procedure is running.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._thread is not None
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
"""
Wait for the optimizer to finish.
.. note::
This method does not stop the optimizer. Call :meth:`stop` to terminate the optimizer.
:param float timeout: The timeout to wait for the optimization to complete (minutes).
If ``None``, then wait until we reached the timeout, or optimization completed.
:return: True, if the optimization finished. False, if the optimization timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.
else:
timeout = max(0, self.optimization_timeout - self.optimization_start_time) \
if self.optimization_timeout else None
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def set_time_limit(self, in_minutes=None, specific_time=None):
# type: (Optional[float], Optional[datetime]) -> ()
"""
Set a time limit for the HyperParameterOptimizer controller. If we reached the time limit, stop the optimization
process. If ``specific_time`` is provided, use it; otherwise, use the ``in_minutes``.
:param float in_minutes: The maximum processing time from current time (minutes).
:param datetime specific_time: The specific date/time limit.
"""
if specific_time:
self.optimization_timeout = specific_time.timestamp()
else:
self.optimization_timeout = (float(in_minutes) * 60.) + time() if in_minutes else None
def get_time_limit(self):
# type: () -> datetime
"""
Return the controller optimization time limit.
:return: The absolute datetime limit of the controller optimization process.
"""
return datetime.fromtimestamp(self.optimization_timeout)
def elapsed(self):
# type: () -> float
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self.optimization_start_time is None:
return -1.0
return (time() - self.optimization_start_time) / 60.
def reached_time_limit(self):
# type: () -> bool
"""
Did the optimizer reach the time limit
The values are:
- ``True`` - The time limit passed.
- ``False`` - The time limit did not pass.
This method returns immediately, it does not wait for the optimizer.
:return: True, if optimizer is running and we passed the time limit, otherwise returns False.
"""
if self.optimization_start_time is None:
return False
if not self.is_running():
return False
return time() > self.optimization_timeout
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
if not self.optimizer:
return []
return self.optimizer.get_top_experiments(top_k=top_k)
def get_optimizer(self):
# type: () -> SearchStrategy
"""
Return the currently used optimizer object.
:return: The SearchStrategy object used.
"""
return self.optimizer
def set_default_job_class(self, job_class):
# type: (TrainsJob) -> ()
"""
Set the Job class to use when the optimizer spawns new Jobs.
:param TrainsJob job_class: The Job Class type.
"""
self.optimizer.set_job_class(job_class)
def set_report_period(self, report_period_minutes):
# type: (float) -> ()
"""
Set reporting period for the accumulated objective report (minutes). This report is sent on the Optimizer Task,
and collects the Objective metric from all running jobs.
:param float report_period_minutes: The reporting period (minutes). The default is once every 10 minutes.
"""
self._report_period_min = float(report_period_minutes)
def _connect_args(self, optimizer_class=None, hyper_param_configuration=None, **kwargs):
# type: (SearchStrategy, dict, Any) -> (SearchStrategy, list, dict)
if not self._task:
logger.warning('Auto Connect turned on but no Task was found, '
'hyper-parameter optimization argument logging disabled')
return optimizer_class, hyper_param_configuration, kwargs
configuration_dict = {'parameter_optimization_space': [c.to_dict() for c in hyper_param_configuration]}
self._task.connect_configuration(configuration_dict)
# this is the conversion back magic:
configuration_dict = {'parameter_optimization_space': [
Parameter.from_dict(c) for c in configuration_dict['parameter_optimization_space']]}
arguments = {'opt': kwargs}
if type(optimizer_class) != type:
logger.warning('Auto Connect optimizer_class disabled, {} is already instantiated'.format(optimizer_class))
self._task.connect(arguments)
else:
arguments['opt']['optimizer_class'] = str(optimizer_class).split('.')[-1][:-2] \
if not isinstance(optimizer_class, str) else optimizer_class
self._task.connect(arguments)
# this is the conversion back magic:
original_class = optimizer_class
optimizer_class = arguments['opt'].pop('optimizer_class', None)
if optimizer_class == 'RandomSearch':
optimizer_class = RandomSearch
elif optimizer_class == 'GridSearch':
optimizer_class = GridSearch
elif optimizer_class == 'OptimizerBOHB':
from .hpbandster import OptimizerBOHB
optimizer_class = OptimizerBOHB
elif optimizer_class == 'OptimizerOptuna':
from .optuna import OptimizerOptuna
optimizer_class = OptimizerOptuna
else:
logger.warning("Could not resolve optimizer_class {} reverting to original class {}".format(
optimizer_class, original_class))
optimizer_class = original_class
return optimizer_class, configuration_dict['parameter_optimization_space'], arguments['opt']
def _daemon(self):
# type: () -> ()
"""
Implement the main pooling thread, calling loop every ``self.pool_period_minutes`` minutes.
"""
self.optimizer.start()
self._thread = None
def _report_daemon(self):
# type: () -> ()
title, series = self.objective_metric.get_objective_metric()
title = '{}/{}'.format(title, series)
counter = 0
completed_jobs = dict()
task_logger = None
cur_completed_jobs = set()
while self._thread is not None:
timeout = self.optimization_timeout - time() if self.optimization_timeout else 0.
if timeout >= 0:
timeout = min(self._report_period_min * 60., timeout if timeout else self._report_period_min * 60.)
# make sure that we have the first report fired before we actually go to sleep, wait for 15 sec.
if counter <= 0:
timeout = 15
print('Progress report #{} completed, sleeping for {} minutes'.format(counter, timeout / 60.))
if self._stop_event.wait(timeout=timeout):
# wait for one last report
timeout = -1
counter += 1
# get task to report on.
if self._task or Task.current_task():
task_logger = (self._task or Task.current_task()).get_logger()
# do some reporting
self._report_remaining_budget(task_logger, counter)
if self.optimizer.budget.compute_time.used and self.optimizer.budget.compute_time.used >= 1.0:
# Reached compute time limit
timeout = -1
self._report_resources(task_logger, counter)
# collect a summary of all the jobs and their final objective values
cur_completed_jobs = set(self.optimizer.get_created_jobs_ids().keys()) - \
{j.task_id() for j in self.optimizer.get_running_jobs()}
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
# if we should leave, stop everything now.
if timeout < 0:
# we should leave
self.stop(wait_for_reporter=False)
return
if task_logger and counter:
counter += 1
self._report_remaining_budget(task_logger, counter)
self._report_resources(task_logger, counter)
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title, force=True)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
def _report_completed_status(self, completed_jobs, cur_completed_jobs, task_logger, title, force=False):
best_experiment = float('-inf'), None
if force or cur_completed_jobs != set(completed_jobs.keys()):
pairs = []
labels = []
created_jobs = copy(self.optimizer.get_created_jobs_ids())
id_status = {j_id: j_run.status() for j_id, j_run in self.optimizer.get_created_jobs_tasks().items()}
for i, (job_id, params) in enumerate(created_jobs.items()):
value = self.objective_metric.get_objective(job_id)
if job_id in completed_jobs:
if value != completed_jobs[job_id][0]:
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)})))
elif completed_jobs.get(job_id):
completed_jobs[job_id] = (completed_jobs[job_id][0],
completed_jobs[job_id][1],
copy(dict(**params, **{"status": id_status.get(job_id)})))
pairs.append((i, completed_jobs[job_id][0]))
labels.append(str(completed_jobs[job_id][2])[1:-1])
else:
if value is not None:
pairs.append((i, value))
labels.append(str(params)[1:-1])
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)})))
# callback new experiment completed
if self._experiment_completed_cb:
normalized_value = self.objective_metric.get_normalized_objective(job_id)
if normalized_value is not None and normalized_value > best_experiment[0]:
best_experiment = normalized_value, job_id
c = completed_jobs[job_id]
self._experiment_completed_cb(job_id, c[0], c[1], c[2], best_experiment[1])
if pairs:
print('Updating job performance summary plot/table')
# update scatter plot
task_logger.report_scatter2d(
title='optimization', series=title,
scatter=pairs, iteration=0, labels=labels,
mode='markers', xaxis='job #', yaxis='objective')
# update summary table
if pd:
index = list(completed_jobs.keys())
table = {'objective': [completed_jobs[i][0] for i in index],
'iteration': [completed_jobs[i][1] for i in index]}
columns = set([c for k, v in completed_jobs.items() for c in v[2].keys()])
for c in sorted(columns):
table.update({c: [completed_jobs[i][2].get(c, '') for i in index]})
df = pd.DataFrame(table, index=index)
df.sort_values(by='objective', ascending=bool(self.objective_metric.sign < 0), inplace=True)
df.index.name = 'task id'
task_logger.report_table(
"summary", "job", 0, table_plot=df,
extra_layout={"title": "objective: {}".format(title)})
def _report_remaining_budget(self, task_logger, counter):
# noinspection PyBroadException
try:
budget = self.optimizer.budget.to_dict()
except Exception:
budget = {}
# report remaining budget
for budget_part, value in budget.items():
task_logger.report_scalar(
title='remaining budget', series='{} %'.format(budget_part),
iteration=counter, value=round(100 - value['used'] * 100., ndigits=1))
if self.optimization_timeout and self.optimization_start_time:
task_logger.report_scalar(
title='remaining budget', series='time %',
iteration=counter,
value=round(100 - (100. * (time() - self.optimization_start_time) /
(self.optimization_timeout - self.optimization_start_time)), ndigits=1)
)
def _report_completed_tasks_best_results(self, completed_jobs, task_logger, title, counter):
# type: (Set[str], Logger, str, int) -> ()
if completed_jobs:
value_func, series_name = (max, "max") if self.objective_metric.get_objective_sign() > 0 else \
(min, "min")
latest_completed, obj_values = self._get_latest_completed_task_value(completed_jobs, series_name)
if latest_completed:
val = value_func(obj_values)
task_logger.report_scalar(
title=title,
series=series_name,
iteration=counter,
value=val)
task_logger.report_scalar(
title=title,
series="last reported",
iteration=counter,
value=latest_completed)
def _report_resources(self, task_logger, iteration):
# type: (Logger, int) -> ()
self._report_active_workers(task_logger, iteration)
self._report_tasks_status(task_logger, iteration)
def _report_active_workers(self, task_logger, iteration):
# type: (Logger, int) -> ()
cur_task = self._task or Task.current_task()
res = cur_task.send(workers_service.GetAllRequest())
response = res.wait()
if response.ok():
all_workers = response
queue_workers = len(
[
worker.get("id")
for worker in all_workers.response_data.get("workers")
for q in worker.get("queues")
if q.get("name") == self.execution_queue
]
)
task_logger.report_scalar(title="resources",
series="queue workers",
iteration=iteration,
value=queue_workers)
def _report_tasks_status(self, task_logger, iteration):
# type: (Logger, int) -> ()
tasks_status = {"running tasks": 0, "pending tasks": 0}
for job in self.optimizer.get_running_jobs():
if job.is_running():
tasks_status["running tasks"] += 1
else:
tasks_status["pending tasks"] += 1
for series, val in tasks_status.items():
task_logger.report_scalar(
title="resources", series=series,
iteration=iteration, value=val)
def _get_latest_completed_task_value(self, cur_completed_jobs, series_name):
# type: (Set[str], str) -> (float, List[float])
completed_value = None
latest_completed = None
obj_values = []
cur_task = self._task or Task.current_task()
for j in cur_completed_jobs:
res = cur_task.send(tasks_services.GetByIdRequest(task=j))
response = res.wait()
if not response.ok() or response.response_data["task"].get("status") != Task.TaskStatusEnum.completed:
continue
completed_time = datetime.strptime(response.response_data["task"]["completed"].partition("+")[0],
"%Y-%m-%dT%H:%M:%S.%f")
completed_time = completed_time.timestamp()
completed_values = self._get_last_value(response)
obj_values.append(completed_values['max_value'] if series_name == "max" else completed_values['min_value'])
if not latest_completed or completed_time > latest_completed:
latest_completed = completed_time
completed_value = completed_values['value']
return completed_value, obj_values
def _get_last_value(self, response):
metrics, title, series, values = TrainsJob.get_metric_req_params(self.objective_metric.title,
self.objective_metric.series)
last_values = response.response_data["task"]['last_metrics'][title][series]
return last_values
|
Navigator_pf.py | #encoding=utf-8
'''
project overview:
Subscribe:
1.slam pose(global/local pose) *
2.octomap_server/global map
3.local pointcloud/local octomap
4.target input(semantic target/visual pose target/gps target)
Publish:
1.Mavros(amo) Command
2.Navigator status
Algorithms:
1.D*
2.state transfer
3.position->position PID controller
4.global/semantic/visual target to local pose
'''
import threading
import time
from path_optimization.path_pruning import PathPruning
# for ros
import rospy
from geometry_msgs.msg import PoseStamped, Twist
from std_msgs.msg import Float32, String
from sensor_msgs.msg import Imu, NavSatFix, PointCloud, PointCloud2
import sensor_msgs.point_cloud2 as pc2
from visualization_msgs.msg import Marker,MarkerArray
# for mavros
from mavros_msgs.msg import GlobalPositionTarget, State, PositionTarget#, Command
from mavros_msgs.srv import CommandBool, SetMode
# for octomap
from octomap_msgs.msg import Octomap, OctomapWithPose, octomap_msgs
from helper import save_points3D, load_points3D
# other useful utilities
#from pyquaternion import Quaternion
import pyquaternion
import astar.astar
import astar.driver
import time
import math
from enum import Enum
import thread
#from queue import Queue
#from Pos2PosController import Pos2PosController as Controller # TODO:re-implement this.
from SimController import Controller as Controller
import DiscreteGridUtils
import numpy as np
from RandomSampling.randomsampling import randomsampling
from PotentialField.PotentialField import PotentialField3D
# define system status
class status(Enum):
INITIALIZED = 1
LOOKING_FOR_PATH = 2
LOOKING_FOR_PATH_SUCCEED = 3
LOOKING_FOR_PATH_FAILED = 4
GOING_TO_TARGET = 5
GOING_TO_VISION_TARGET = 6
class Navigator:
def __init__(self, save_pts=False, config_file_path=None):
if config_file_path:
pass
rospy.init_node("gi_navigator_node")
self.dg = DiscreteGridUtils.DiscreteGridUtils(grid_size=0.1) # modify grid size according to different purposes
self.rate = rospy.Rate(50)
self.driver = astar.driver.Driver()
self.controller = Controller()
self.mavros_state = "OFFBOARD"
self.set_status(status.INITIALIZED)
self.save_pts = save_pts
self.cur_command_id = 0
self.prev_command_id = 0
self.cur_target_position = None
self.task_id = -1
self.obstacle_set_mutex = threading.Lock() # mutex.acquire(timeout);mutex.release()
self.nav_command_mutex = threading.Lock() # for nav command in dstar and ros high level command.
self.local_pose_d = None
self.local_pose_c = None
self.navigator_status_pub = rospy.Publisher('/gi/navigator_status', String, queue_size=10)
self.path_plan_pub = rospy.Publisher('/gi/navi_path_plan', MarkerArray, queue_size=10)
self.path = []
self.path_prune = PathPruning(obstacle_distance=12)
# self.rs = randomsampling()
self.pf = PotentialField3D()
t1 = threading.Thread(target=self.ros_thread)
t1.start()
'''
Navigating thread
'''
def keep_navigating(self):
while self.mavros_state == "OFFBOARD" and not (rospy.is_shutdown()):
# while 1:
# get target position and local position in discrete
current_pos = self.get_local_pose_d()
end_pos = self.get_latest_target()
if current_pos is None:
print('current pose not valid!')
time.sleep(0.5)
continue
while not self.reachTargetPositionDiscrete(end_pos, 4) \
and (not self.navi_task_terminated()) \
and (not rospy.is_shutdown()): # Till task is finished:
print('From ', self.get_local_pose_d())
# This is the place where you modify path planner
t1 = time.time()
self.set_status(status.LOOKING_FOR_PATH)
print("start and end are: ", self.get_local_pose_d(), end_pos)
temp_obs = self.driver.get_obstacles_around()
if(temp_obs is not None):
self.pf.set_obstacle(temp_obs)
self.path = self.pf.find_path(np.array(self.get_local_pose_d()), np.array(end_pos))
t2 = time.time()
print('Path planning time cost:', (t2 - t1))
if not self.path:
self.set_status(status.LOOKING_FOR_PATH_SUCCEED)
print('No path found!, self.path is None')
time.sleep(0.05)
else:
# Path found. keep state machine and do task step by step.
print("Path found!")
self.publish_path(self.path, (1, 0, 0))
self.set_status(status.LOOKING_FOR_PATH_FAILED)
# going through each waypoint
for next_move in self.path:
if self.navi_task_terminated():
break
print('next_move : ', next_move)
# if not self.driver.algo.is_valid(next_move, self.driver.get_obstacles_around()):
# print('Next waypoint is in collision with obstacle, path not valid!')
# break
next_position_continuous = self.dg.discrete_to_continuous_target(next_move)
print("local pose: ", self.local_pose_c)
print("target pose: ", next_position_continuous)
while not self.reachTargetPositionContinuous(next_position_continuous, 1):
self.controller.mav_move(next_position_continuous[0],
next_position_continuous[1],
next_position_continuous[2],
abs_mode=True)
time.sleep(0.05)
print("Target Reached!")
time.sleep(0.05) # wait for new nav task.
print("Mavros not in OFFBOARD mode, Disconnected!")
'''
move quad in body frame
'''
def terminate_navigating(self):
#TODO
pass
def resume_navigating(self):
#TODO
pass
def set_target_position(self, target_position):
if target_position and len(target_position) is 3:
self.cur_target_position = self.dg.continuous_to_discrete(target_position)
def get_latest_target(self):
return self.cur_target_position
def set_vision_target(self, vision_target):
self.set_status(status.GOING_TO_VISION_TARGET)
self.set_target_position(vision_target)
def navi_task_terminated(self):
if self.dist(self.local_pose_d, self.cur_target_position) < 2: #TODO: or stop flag is set.
return True
else:
return False
'''
Dstar Thread
def Dstar_thread(self):
while not rospy.is_shutdown():
while status!= xxx:# TODO
next_move = xxx
return next_move'''
'''##For test:
target = [0.5, 0.5, 0.5]
self.set_target_postion(target)
pass'''
'''
ROS thread
responsible for subscribers and publishers
'''
def ros_thread(self):
print('ros_thread spawn!!!!')
self.octomap_msg = None
# subscribers
self.slam_sub = rospy.Subscriber("/gi/slam_output/pose", PoseStamped, self.slam_pose_callback)
self.vision_target_sub = rospy.Subscriber("/gi/visual_target/pose", PoseStamped, self.vision_target_callback)
self.point_cloud_sub = rospy.Subscriber("/camera/left/point_cloud", PointCloud, self.point_cloud_callback)
self.octomap_cells_vis = rospy.Subscriber("/octomap_point_cloud_centers", PointCloud2, self.octomap_update_callback)
self.local_pose_sub = rospy.Subscriber("/mavros/local_position/pose", PoseStamped, self.local_pose_callback)
self.mavros_sub = rospy.Subscriber("/mavros/state", State, self.mavros_state_callback)
# publishers
#self.mavros_control_pub = rospy.Publisher('mavros/Command', Command, queue_size=10)
self.set_status(status.INITIALIZED)
rospy.spin()
'''
ROS callbacks
'''
def slam_pose_callback(self, msg):
self.slam_pose = msg
def vision_target_callback(self, msg):
self.vision_target = msg
#print("Received New Vision Target!")
def mavros_state_callback(self, msg):
self.mavros_state = msg.mode
self.navigator_status_pub.publish(self.STATUS)
def point_cloud_callback(self, msg):
self.current_point_cloud = msg
def octomap_update_callback(self, msg): # as pointcloud2.
obs_set = set()
for p in pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True):
#print " x : %f y: %f z: %f" % (p[0], p[1], p[2])
point = self.dg.continuous_to_discrete((p[0], p[1], p[2]))
#print("corresponding discrete value: ", point)
obs_set.add(point)
# save points set
if(self.save_pts):
save_points3D(obs_set)
acquired = self.obstacle_set_mutex.acquire(True) # blocking.
if acquired:
#print('octomap updated!')
self.driver.set_obstacle_set(obs_set)
self.obstacle_set_mutex.release()
return
else:
print('Lock not acquired!')
def local_pose_callback(self, msg):
pose_ = msg.pose.position #TODO:do fusion with visual slam.
self.local_pose_c = (pose_.x, pose_.y, pose_.z)
self.local_pose_d = self.dg.continuous_to_discrete((pose_.x, pose_.y, pose_.z))
# return pose in discrete
def get_local_pose_d(self): # in mavros axis.for command.
return self.local_pose_d
# return pose in continuous
def get_local_pose_c(self):
return self.local_pose_c
'''
helper functions
'''
def set_status(self, status):
self.STATUS = String(status.name)
def dist(sefl, pos1, pos2):
if not pos1 or not pos2:
return False, 0
else:
return True, reduce(lambda x, y: x + y, map(lambda i: (pos1[i] - pos2[i]) ** 2, [0, 1, 2]))
# target should be Continuous
def reachTargetPositionContinuous(self, target, threshold=0.7):
delta_x = math.fabs(self.local_pose_c[0] - target[0])
delta_y = math.fabs(self.local_pose_c[1] - target[1])
delta_z = math.fabs(self.local_pose_c[2] - target[2])
distance = (delta_x + delta_y + delta_z)
print("distance: ", distance, "threshold: ", threshold)
if distance < threshold:
return True
else:
return False
# target should be discrete
def reachTargetPositionDiscrete(self, target, threshold=3):
delta_x = math.fabs(self.local_pose_d[0] - target[0])
delta_y = math.fabs(self.local_pose_d[1] - target[1])
delta_z = math.fabs(self.local_pose_d[2] - target[2])
distance = (delta_x + delta_y + delta_z)
print("distance: ", distance, "threshold: ", threshold)
if distance < threshold:
return True
else:
return False
def setMavMode(self, msg):
pass
def do_hover(self):
pass
def publish_path(self, path, RGB=(1, 0, 0)):
m_arr = MarkerArray()
marr_index = 0
for next_move in path:
point = self.dg.discrete_to_continuous_target((next_move[0], next_move[1], next_move[2]))
mk = Marker()
mk.header.frame_id = "map"
mk.action = mk.ADD
mk.id = marr_index
marr_index += 1
mk.color.r = RGB[0]
mk.color.g = RGB[1]
mk.color.b = RGB[2]
mk.color.a = 1.0
mk.type = mk.CUBE
mk.scale.x = 0.3
mk.scale.y = 0.3
mk.scale.z = 0.3
mk.pose.position.x = point[0]
mk.pose.position.y = point[1]
mk.pose.position.z = point[2]
m_arr.markers.append(mk)
self.path_plan_pub.publish(m_arr)
if __name__ == '__main__':
nav = Navigator(False)
# target position should be meter
# nav.set_target_position((0, -3, 1))
nav.set_target_position((5, 0, 1))
nav.keep_navigating()
|
test_threading_local.py | import sys
import unittest
from doctest import DocTestSuite
from test import support
import weakref
import gc
# Modules under test
import _thread
import threading
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
with support.start_threads(threading.Thread(target=f, args=(i,))
for i in range(10)):
pass
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def test_arguments(self):
# Issue 1522237
class MyLocal(self._local):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, self._local, a=1)
self.assertRaises(TypeError, self._local, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIsNone(wr())
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
a_HARP_LiveDemo_ICIP2014.py | #!/usr/bin/env python
# coding: utf8
# (c) 2014 Dominic Springer, Wolfgang Schnurrer
# File licensed under GNU GPL (see HARP_License.txt)
import sys, os
__builtins__.ProjectDir = os.path.abspath("../")
assert( "HARP" in os.path.basename(ProjectDir) )
__builtins__.LibDir = ProjectDir + "/PythonLib"
__builtins__.TmpDir = ProjectDir + "/tmp"
sys.path.append(LibDir)
# GLOBAL SETTINGS, CHANGE HERE --------------------------------
X265_BinDir = ProjectDir + "/bin/Demo/x265_64Bit"
#X265_BinDir = "/home/lnt335/HARP/HARP"
VideoSrc = 0 # V4L2 video source
isVirtualCam = False #for debugging
# -------------------------------------------------------------
os.environ["LD_LIBRARY_PATH"] = X265_BinDir
from Imports_Basic import *
from OpenCV import *
from System import *
from Sequence import *
from Warp import *
from GUI.ShowPOC import *
from GUI.AnalyzePOC import *
from Encoder.X265_Encoder import *
from Encoder.HM_Encoder import *
# OPENCV VIDEO
import sys, os
sys.path.append(ProjectDir + "/Various/ThirdParty")
import opencv.video #third party
# PYQTGRAPH
from PyQt4 import QtGui #force PyQt
for gs in ['raster', 'native', 'opengl']: # force specific graphics system
if gs in sys.argv:
QtGui.QApplication.setGraphicsSystem(gs)
break
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
from pyqtgraph.dockarea import *
import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
from PyQt4.QtCore import * #reimport since pyqtgraph runs: "from PyQt4 import QtCore, QtGui"
from PyQt4.QtGui import *
# END PYQTGRAPH
cam = None
Ref = None
Cur = None
DimX = None
DimY = None
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class VirtualCam(object):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self):
#==========================================
super(VirtualCam, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
self.Images = []
Img0 = readImg(ProjectDir + "/Various/Resources/Tohajiilee.jpg")
Img0 = cv2.resize(Img0, (640, 480), interpolation=cv2.INTER_LINEAR)
Img1 = readImg(ProjectDir + "/Various/Resources/Tohajiilee_rotated.jpg")
Img1 = cv2.resize(Img1, (640, 480), interpolation=cv2.INTER_LINEAR)
self.Images.append(Img0)
self.Images.append(Img1)
self.toggle = 0
#==========================================
def read(self):
#==========================================
if self.toggle == 0:
Image = self.Images[0]
else : Image = self.Images[1]
self.toggle = not self.toggle
time.sleep(0.015)
return None, Image
#==========================================
def release(self):
#==========================================
pass
#==========================================
def startGrabbing():
#==========================================
global VideoSrc
global cam
global DimX, DimY
global Ref, Cur
global ThreadLock
global StopGrabbingThread
global isVirtualCam
if isVirtualCam:
cam = VirtualCam()
else:
cam = opencv.video.create_capture(VideoSrc)
#-------------------------------------
# RETRIEVE CAM INFOS
#-------------------------------------
ret, frame = cam.read()
DimY, DimX = frame.shape[:2]
print "\nWebcam resolution: %dx%d\n" % (DimX, DimY)
cnt = 0
while(not StopGrabbingThread):
ret, TmpRefRGB = cam.read()
ThreadLock.acquire()
for x in range(0, 4):
ret, test = cam.read()
ret, TmpCurRGB = cam.read()
TmpCur = cv2.cvtColor(TmpCurRGB, cv2.COLOR_RGB2YUV)
TmpRef = cv2.cvtColor(TmpRefRGB, cv2.COLOR_RGB2YUV)
Ref = TmpRef
Cur = TmpCur
#MSEC = cam.get(cv2.cv.CV_CAP_PROP_FPS)
if cnt % 100 == 0:
print "Webcam captured: %d pairs" % cnt
cnt += 1
ThreadLock.release()
#==========================================
def stopGrabbing():
#==========================================
global cam
global StopGrabbingThread
StopGrabbingThread = True
ThreadLock.acquire()
cam.release()
ThreadLock.release()
GrabbingThread = threading.Thread(target=startGrabbing)
GrabbingThread.daemon = False
GrabbingThread.start()
ThreadLock = threading.Lock()
StopGrabbingThread = False
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class Demo(object):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self, GUI):
#==========================================
super(Demo, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
self.GUI = GUI
self.p0 = None
self.use_ransac = True
os.chdir(TmpDir)
#==========================================
def processFramePair(self, Ref, Cur):
#==========================================
QP = self.GUI.p.param('Basic settings', 'QP').value()
if QP>50:
QP=50
if QP<1:
QP=1
QP = round(QP)
self.GUI.p.param('Basic settings', 'QP').setValue(QP)
isX265 = self.GUI.p.param('Basic settings', 'x265').value()
DimY, DimX = Ref.shape[:2]
assert(DimX == 640 and DimY == 480)
self.starttime = time.time()
FN_YUV = TmpDir + "/webcam_%dx%d.yuv" % ( DimX, DimY)
outfile = open(FN_YUV, "w")
#--------------------------------------
# CREATE YUV (2 FRAMES)
#--------------------------------------
write_YUV(outfile, Ref)
write_YUV(outfile, Cur)
outfile.close()
Sequ = Sequence(FN_YUV, Fr=2, FPS=30)
#--------------------------------------
# RUN X265 ENCODER
#--------------------------------------
if isX265:
global X265_BinDir
EncBin = X265_BinDir + "/x265"
INI_FN = ProjectDir + "/PythonLib/Encoder/X265_Settings.ini"
assert os.path.isfile(INI_FN), "INI file not found: " + INI_FN
Encoder = X265_Encoder( OutputDir=TmpDir, Passport="007",
Name="X265", InfoStr="_Test",
EncBin=EncBin, DecBin=None, INI_FN=INI_FN, OverrideStr="",
Sequ=Sequ, QP=QP,
PSNR_Tool=None)
EncoderCmd = Encoder.get_CommandLineCall()
print "EncoderCmd: " + EncoderCmd
assert os.system(EncoderCmd)==0, "encoder cmd failed"
else:
#--------------------------------------
# RUN HM ENCODER
#--------------------------------------
EncBin = ProjectDir + "/bin/TAppEncoder"
INI_FN = ProjectDir + "/PythonLib/Encoder/HM_Encoder.ini"
assert os.path.isfile(INI_FN), "INI file not found: " + INI_FN
Encoder = HM_Encoder( OutputDir=TmpDir, Passport="007",
Name="HMEnc", InfoStr="_Test",
EncBin=EncBin, DecBin=None, INI_FN=INI_FN, OverrideStr="",
Sequ=Sequ, QP=QP,
PSNR_Tool=None)
EncoderCmd = Encoder.get_CommandLineCall()
EncoderCmd += " --HARP_TmpDir=."
print "EncoderCmd: " + EncoderCmd
#DecoderCmd += " --HARP_PUs" #debug
assert os.system(EncoderCmd)==0, "encoder cmd failed"
#--------------------------------------
# RUN HM DECODER
#--------------------------------------
DecoderCmd = ProjectDir + "/bin/TAppDecoder -b " + Encoder.bitstream + " -o decoded.yuv --HARP_TmpDir=. "
#DecoderCmd += " --HARP_PUs" #debug
assert os.system(DecoderCmd)==0, "decoder cmd failed"
#-------------------------------------
# LOADING DECODER PKL
#-------------------------------------
print "LOADING DECODER PKL"
POCIdx = 0
FN = TmpDir + "/" + "PyPOC_%05d.pkl" % POCIdx
assert os.path.exists(FN), "PWD: %s, missing FN: %s" % (os.getcwd(), FN)
POC_Intra = pickle.load(open(FN, "rb" ) )
POCIdx = 1
FN = TmpDir + "/" + "PyPOC_%05d.pkl" % POCIdx
assert os.path.exists(FN), "PWD: %s, missing FN: %s" % (os.getcwd(), FN)
POC_Inter = pickle.load(open(FN, "rb" ) )
#self.GUI.p.param('Basic settings', 'Show CUs').setOpts(readonly=False, enabled=True)
Show_CUs = self.GUI.p.param('Basic settings', 'Show CUs').value()
Show_PUs = self.GUI.p.param('Basic settings', 'Show PUs').value()
Show_Modes = self.GUI.p.param('Basic settings', 'Show Modes').value()
#self.GUI.p.param('Basic settings', 'Show CUs').setValue(not self.GUI.p.param('Basic settings', 'Show CUs').value())
self.ShowPOC_Intra = ShowPOC(POC_Intra, Show_CUs, Show_PUs, Show_Modes)
self.ShowPOC_Intra.visualize()
self.ShowPOC_Inter = ShowPOC(POC_Inter, Show_CUs, Show_PUs, Show_Modes)
self.ShowPOC_Inter.visualize()
self.AnalyzePOC_Inter = AnalyzePOC(POC_Inter)
self.AnalyzePOC_Inter.analyze()
#cv2.imwrite(TmpDir + "/VizPUs.jpg", VizPUs)
#--------------------------------------
# TIMING
#--------------------------------------
NumSecs = (time.time() - self.starttime)
FPS = 1 / NumSecs
print "\nFPS = %f \n ------------------ \n" % FPS
#==========================================
def run(self):
#==========================================
try :
global Ref
global Cur
#-------------------------------------
# FOR ALL RUNS
#-------------------------------------
NumRuns = 1000
for myrun in np.arange(NumRuns):
self.processFramePair(Ref, Cur, 15)
cv2.imshow('frame', self.VizPUs)
cv2.waitKey(1)
stopGrabbing()
cv2.destroyAllWindows()
except Exception, Str: #prevent V4L2 to eat the webcam
import traceback
print "Exception!"
stopGrabbing()
cv2.destroyAllWindows()
print "EXCEPTION------------------------"
print "Unexpected ERROR:", sys.exc_info()[0]
traceback.print_tb(sys.exc_info()[2])
print Str
print "---------------------------------"
raise
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class GUIMainWindow(QtGui.QMainWindow):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#==========================================
def __init__(self):
#==========================================
super(GUIMainWindow, self).__init__()
# CHANGE HERE -------------------------------------------------
# -------------------------------------------------------------
area = DockArea()
self.setCentralWidget(area)
self.resize(1280, 800)
self.setWindowTitle('Demo: How to use HARP with Python')
## Create docks, place them into the window one at a time.
## Note that size arguments are only a suggestion; docks will still have to
## fill the entire dock area and obey the limits of their internal widgets.
d1 = Dock("Control", size=(300,200)) ## give this dock the minimum possible size
d2 = Dock("Description", size=(300,800))
d31 = Dock("INTRA frame - Prediction Units", size=(500,300))
d32 = Dock("INTER frame - Prediction Units", size=(500,300))
#d33 = Dock("Dock3 - Transform Units", size=(500,300))
d41 = Dock("Frame Difference ", size=(100,100))
d42 = Dock("Current Frame ", size=(100,100))
d51 = Dock("CU Depths", size=(200,100))
d52 = Dock("MVs X Component", size=(200,100))
d53 = Dock("MVs Y Component", size=(200,100))
area.addDock(d2, 'left') ## place d1 at left edge of dock area (it will fill the whole space since there are no other docks yet)
area.addDock(d1, 'bottom', d2) ## place d2 at right edge of dock area
area.addDock(d31, 'right')
area.addDock(d32, 'bottom', d31)
#area.addDock(d33, 'bottom', d32)
area.addDock(d41, 'right')
area.addDock(d51, 'bottom', d41)
area.addDock(d42, 'right', d41)
area.addDock(d52, 'right', d51)
area.addDock(d53, 'right', d52)
#==========================================
def dock_ImageItem(self, Dock):
#==========================================
pgGLWidget = pg.GraphicsLayoutWidget()
ViewBox = pgGLWidget.addViewBox(invertY = True)
#ViewBox.setBackgroundColor((255,255,255))
ViewBox.setAspectLocked(True)
pgImageItem = pg.ImageItem(border='w')
ViewBox.addItem(pgImageItem)
Dock.addWidget(pgGLWidget)
return pgImageItem
#==========================================
def dock_CurveItem(self, Dock, Title, LabelX, LabelY):
#==========================================
pgGWindow= pg.GraphicsLayoutWidget()
pgPlot = pgGWindow.addPlot(title=Title)
x =[0,0,0]
y = [0,0]
pgCurveItem = pg.PlotCurveItem(x, y, stepMode=True, fillLevel=0, brush=(0, 255, 0, 80))
pgPlot.addItem(pgCurveItem)
pgPlot.setLabel('bottom', LabelX)
pgPlot.setLabel('left', LabelY)
Dock.addWidget(pgGWindow)
return pgCurveItem
self.ImageItem_d2 = dock_ImageItem(self, d2)
self.ImageItem_d31 = dock_ImageItem(self, d31)
self.ImageItem_d32 = dock_ImageItem(self, d32)
self.ImageItem_d41 = dock_ImageItem(self, d41)
self.ImageItem_d42 = dock_ImageItem(self, d42)
self.CurveItem_d51 = dock_CurveItem(self, d51, "CU Depths", "CU Depth", "Number of Occurences")
self.CurveItem_d52 = dock_CurveItem(self, d52, "MVs X Component", "Magnitude", "Number of Occurences")
self.CurveItem_d53 = dock_CurveItem(self, d53, "MVs Y Component", "Magnitude", "Number of Occurences")
params = [
{'name': 'Basic settings', 'type': 'group', 'children':
[
{'name': 'QP', 'type': 'int', 'value': 30},
{'name': 'x265', 'type': 'bool', 'value': True},
{'name': 'Show CUs', 'type': 'bool', 'value': True},
{'name': 'Show PUs', 'type': 'bool', 'value': True},
{'name': 'Show Modes', 'type': 'bool', 'value': True},
]},
]
## Create tree of Parameter objects
p = Parameter.create(name='params', type='group', children=params, readonly=False, enabled=True)
t = ParameterTree()
t.setParameters(p, showTop=False)
t.setWindowTitle('pyqtgraph example: Parameter Tree')
self.p = p
d1.addWidget(t)
MyWorkThread = WorkThread(self)
MyWorkThread.start()
Description = readImg(ProjectDir + "/Various/Resources/Special/LMS_Demo.png")
Description = cv2.transpose(cv2.cvtColor(Description, cv2.COLOR_BGR2RGB))
self.ImageItem_d2.setImage(Description, autoDownsample=True, border=(255,255,255) )
# pic = QtGui.QLabel()
# #pic.setGeometry(0, 0, 400, 400)
# #use full ABSOLUTE path to the image, not relative
# pic.setPixmap(QtGui.QPixmap(ProjectDir + "/Various/Resources/DemoChain.png"))
# pic.setMaximumWidth(300)
# pic.setMaximumHeight(300)
# d2.addWidget(pic)
# #==========================================
# def __del__(self):
# #==========================================
# stopGrabbing()
# super(GUIMainWindow, self).__del__()
#==========================================
# @Slot()
def closeEvent(self,event):
#==========================================
result = QtGui.QMessageBox.question(self,
"Confirm Exit...",
"Are you sure you want to exit ?",
QtGui.QMessageBox.Yes| QtGui.QMessageBox.No)
event.ignore()
if result == QtGui.QMessageBox.Yes:
stopGrabbing()
event.accept()
#sys.exit(0)
#==========================================
# @Slot()
def slot_Update(self, update_tuple):
#==========================================
#(Intra, Inter, LumDiff, CurCopy, CU_Depths))
self.ImageItem_d31.setImage(update_tuple[0])
self.ImageItem_d32.setImage(update_tuple[1])
self.ImageItem_d41.setImage(update_tuple[2])
self.ImageItem_d42.setImage(update_tuple[3])
y,x = np.histogram(update_tuple[4], bins=[0, 1, 2, 3, 4])
#self.MV_x_Histo.setData(x,y)
#self.curve = self.GUI.PlotHisto.plot(pen='y')
self.CurveItem_d51.setData(x,y)
y,x = np.histogram(update_tuple[5], bins=np.linspace(-30*6, 30*6, 80))
self.CurveItem_d52.setData(x,y)
y,x = np.histogram(update_tuple[6], bins=np.linspace(-30*6, 30*6, 80))
self.CurveItem_d53.setData(x,y)
print "Result ready"
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class WorkThread(QtCore.QThread):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
signal_Update = Signal(object)
#==========================================
def __init__(self, rGUIMainWindow):
#==========================================
QtCore.QThread.__init__(self)
self.Demo = Demo(rGUIMainWindow)
self.GUI = rGUIMainWindow
# #==========================================
# def __del__(self):
# #==========================================
# self.wait()
#==========================================
def run(self):
#==========================================
try :
global ThreadLock
global Ref
global Cur
self.signal_Update.connect(self.GUI.slot_Update)
while(1):
ThreadLock.acquire()
RefCopy = np.copy(Ref)
CurCopy = np.copy(Cur)
ThreadLock.release()
self.Demo.processFramePair(RefCopy, CurCopy)
CU_Depths = np.copy(self.Demo.AnalyzePOC_Inter.CU_Depths)
MV_x = np.copy(self.Demo.AnalyzePOC_Inter.MV_x)
MV_y = np.copy(self.Demo.AnalyzePOC_Inter.MV_y)
Intra = cv2.transpose(cv2.cvtColor(self.Demo.ShowPOC_Intra.Final, cv2.COLOR_BGR2RGB))
Inter = cv2.transpose(cv2.cvtColor(self.Demo.ShowPOC_Inter.Final, cv2.COLOR_BGR2RGB))
LumDiff = getLumDiff(RefCopy, CurCopy)
LumDiff = cv2.transpose(LumDiff)
CurCopy = cv2.transpose(cv2.cvtColor(CurCopy, cv2.COLOR_YUV2BGR))
self.signal_Update.emit((Intra, Inter, LumDiff, CurCopy, CU_Depths, MV_x, MV_y))
self.terminate()
except Exception, Str: #prevent V4L2 to eat the webcam
import traceback
print "---- EXCEPTION! ----"
print "ERROR:", sys.exc_info()[0]
traceback.print_tb(sys.exc_info()[2])
print Str
print "---------------------------------"
stopGrabbing()
#cv2.destroyAllWindows()
sys.exit(0)
#==========================================
if __name__ == '__main__':
#==========================================
try :
import signal
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
stopGrabbing()
cv2.destroyAllWindows()
time.sleep(1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
time.sleep(2) #give webcam some time
app = QtGui.QApplication([])
app.setQuitOnLastWindowClosed(True)
MainWindow = GUIMainWindow()
MainWindow.show()
QtGui.QApplication.instance().exec_()
except Exception, Str: #prevent V4L2 to eat the webcam
import traceback
print "---- EXCEPTION! ----"
print "ERROR:", sys.exc_info()[0]
traceback.print_tb(sys.exc_info()[2])
print Str
print "---------------------------------"
stopGrabbing()
#cv2.destroyAllWindows()
sys.exit(0)
|
tcp-streaming-multicast-client-audio.video.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import socket
import base64
import pyaudio
import numpy as np
from threading import Thread
"""
File name: tcp-streaming-multicast-client-audio.video.py
Author: Jäger Cox // jagercox@gmail.com
Date created: 08/08/2016
License: MIT
Python Version: 2.7
Code guide line: PEP8
"""
__author__ = "Jäger Cox // jagercox@gmail.com"
__created__ = "08/08/2016"
__license__ = "MIT"
__version__ = "0.1"
__python_version__ = "2.7"
__email__ = "jagercox@gmail.com"
# Sockets channels configuration
IP_SERVER = "0.0.0.0"
VIDEO_SERVER_PORT = 11111
AUDIO_SERVER_PORT = 11112
TIMEOUT_SOCKET = 10
# Webcam configuration
IMAGE_HEIGHT = 480
IMAGE_WIDTH = 640
COLOR_PIXEL = 3 # RGB
# PyAudio configuration
SIZE = 1024
CHANNELS = 1
RATE = 10240
INPUT = True
FORMAT = pyaudio.paInt16
def audio_thread(socket_connection):
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
INPUT=INPUT)
while True:
data = socket_connection.recv(SIZE)
if data:
stream.write(data)
socket_connection.close()
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == '__main__':
# Socket video initialization
connection_video = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection_video.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
connection_video.settimeout(TIMEOUT_SOCKET)
# Socket audio initialization
connection_audio = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection_audio.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
connection_audio.settimeout(TIMEOUT_SOCKET)
# Connect channels
connection_video.connect((IP_SERVER, VIDEO_SERVER_PORT))
connection_audio.connect((IP_SERVER, AUDIO_SERVER_PORT))
# Create a thread for audio channel
t_audio = Thread(target=audio_thread, args=(connection_audio,))
t_audio.start()
# Main thread video
while True:
try:
# Recept video data
fileDescriptor = connection_video.makefile(mode='rb')
result = fileDescriptor.readline()
fileDescriptor.close()
# Create image
result = base64.b64decode(result)
frame = np.fromstring(result, dtype=np.uint8)
frame_matrix = np.array(frame)
frame_matrix = np.reshape(frame_matrix, (IMAGE_HEIGHT, IMAGE_WIDTH,
COLOR_PIXEL))
# Show viewer
cv2.imshow('Client', frame_matrix)
# Terminate
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except Exception as e:
print "[Error] " + str(e)
cv2.destroyAllWindows()
connection_video.close()
|
guestsnapshotter.py | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import urlparse as urlparser
except ImportError:
import urllib.parse as urlparser
import traceback
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
import multiprocessing as mp
import datetime
from common import CommonVariables
from HttpUtil import HttpUtil
from Utils import Status
from Utils import HandlerUtil
from fsfreezer import FsFreezer
from Utils import HostSnapshotObjects
class SnapshotInfoIndexerObj():
def __init__(self, index, isSuccessful, snapshotTs, errorMessage):
self.index = index
self.isSuccessful = isSuccessful
self.snapshotTs = snapshotTs
self.errorMessage = errorMessage
self.statusCode = 500
def __str__(self):
return 'index: ' + str(self.index) + ' isSuccessful: ' + str(self.isSuccessful) + ' snapshotTs: ' + str(self.snapshotTs) + ' errorMessage: ' + str(self.errorMessage) + ' statusCode: ' + str(self.statusCode)
class SnapshotError(object):
def __init__(self):
self.errorcode = CommonVariables.success
self.sasuri = None
def __str__(self):
return 'errorcode: ' + str(self.errorcode)
class SnapshotResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class GuestSnapshotter(object):
"""description of class"""
def __init__(self, logger, hutil):
self.logger = logger
self.configfile='/etc/azure/vmbackup.conf'
self.hutil = hutil
def snapshot(self, sasuri, sasuri_index, meta_data, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger):
temp_logger=''
error_logger=''
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to do the snapshot because sasuri is none "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparser.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to parse the sasuri "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
start_time = datetime.datetime.utcnow()
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
temp_logger = temp_logger + str(headers)
http_util = HttpUtil(self.logger)
sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')
temp_logger = temp_logger + str(datetime.datetime.now()) + ' start calling the snapshot rest api. '
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg, responseBody = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)
temp_logger = temp_logger + str("responseBody: " + responseBody)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri, responseBody)
temp_logger = temp_logger + str(datetime.datetime.now()) + ' httpresponse_get_snapshot_info message: ' + str(message)
else:
# HttpCall failed
error_logger = error_logger + str(datetime.datetime.now()) + " snapshot HttpCallGetResponse failed "
error_logger = error_logger + str(datetime.datetime.now()) + str(errMsg)
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
end_time = datetime.datetime.utcnow()
time_taken=end_time-start_time
temp_logger = temp_logger + str(datetime.datetime.now()) + ' time taken for snapshot ' + str(time_taken)
except Exception as e:
errorMsg = " Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
error_logger = error_logger + str(datetime.datetime.now()) + errorMsg
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
temp_logger=temp_logger + str(datetime.datetime.now()) + ' snapshot ends..'
global_logger.put(temp_logger)
global_error_logger.put(error_logger)
snapshot_result_error.put(snapshot_error)
snapshot_info_indexer_queue.put(snapshot_info_indexer)
def snapshot_seq(self, sasuri, sasuri_index, meta_data):
result = None
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
self.logger.log("Failed to do the snapshot because sasuri is none",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparser.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
self.logger.log("Failed to parse the sasuri",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
self.logger.log(str(headers))
http_util = HttpUtil(self.logger)
sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')
self.logger.log("start calling the snapshot rest api")
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg, responseBody = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)
self.logger.log("responseBody: " + responseBody)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri, responseBody)
self.logger.log(' httpresponse_get_snapshot_info message: ' + str(message))
else:
# HttpCall failed
self.logger.log(" snapshot HttpCallGetResponse failed ")
self.logger.log(str(errMsg))
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
except Exception as e:
errorMsg = "Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg, False, 'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
return snapshot_error, snapshot_info_indexer
def snapshotall_parallel(self, paras, freezer, thaw_done, g_fsfreeze_on):
self.logger.log("doing snapshotall now in parallel...")
snapshot_result = SnapshotResult()
blob_snapshot_info_array = []
all_failed = True
exceptOccurred = False
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
all_snapshots_failed = False
set_next_backup_to_seq = False
try:
self.logger.log("before start of multiprocessing queues..")
mp_jobs = []
queue_creation_starttime = datetime.datetime.now()
global_logger = mp.Queue()
global_error_logger = mp.Queue()
snapshot_result_error = mp.Queue()
snapshot_info_indexer_queue = mp.Queue()
time_before_snapshot_start = datetime.datetime.now()
blobs = paras.blobs
if blobs is not None:
# initialize blob_snapshot_info_array
mp_jobs = []
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
blob_snapshot_info_array.append(HostSnapshotObjects.BlobSnapshotInfo(False, blobUri, None, 500))
try:
mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger)))
except Exception as e:
self.logger.log("multiprocess queue creation failed")
all_snapshots_failed = True
raise Exception("Exception while creating multiprocess queue")
blob_index = blob_index + 1
counter = 0
for job in mp_jobs:
job.start()
if(counter == 0):
queue_creation_endtime = datetime.datetime.now()
timediff = queue_creation_endtime - queue_creation_starttime
if(timediff.seconds >= 10):
self.logger.log("mp queue creation took more than 10 secs. Setting next backup to sequential")
set_next_backup_to_seq = True
counter = counter + 1
time_after_snapshot_start = datetime.datetime.now()
timeout = self.get_value_from_configfile('timeout')
if timeout == None:
timeout = 60
for job in mp_jobs:
job.join()
thaw_result = None
if g_fsfreeze_on and thaw_done_local == False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
if(set_next_backup_to_seq == True):
self.logger.log("Setting to sequential snapshot")
self.hutil.set_value_to_configfile('seqsnapshot', '1')
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)):
is_inconsistent = True
snapshot_result.errors.append(thaw_result.errors)
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
self.logger.log('end of snapshot process')
logging = [global_logger.get() for job in mp_jobs]
self.logger.log(str(logging))
error_logging = [global_error_logger.get() for job in mp_jobs]
self.logger.log(str(error_logging),False,'Error')
if not snapshot_result_error.empty():
results = [snapshot_result_error.get() for job in mp_jobs]
for result in results:
if(result.errorcode != CommonVariables.success):
snapshot_result.errors.append(result)
if not snapshot_info_indexer_queue.empty():
snapshot_info_indexers = [snapshot_info_indexer_queue.get() for job in mp_jobs]
for snapshot_info_indexer in snapshot_info_indexers:
# update blob_snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, blob_snapshot_info_array[snapshot_info_indexer.index])
if (blob_snapshot_info_array[snapshot_info_indexer.index].isSuccessful == True):
all_failed = False
self.logger.log("index: " + str(snapshot_info_indexer.index) + " blobSnapshotUri: " + str(blob_snapshot_info_array[snapshot_info_indexer.index].snapshotUri))
all_snapshots_failed = all_failed
self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed))
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
else:
self.logger.log("the blobs are None")
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform parallel snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def snapshotall_seq(self, paras, freezer, thaw_done, g_fsfreeze_on):
exceptOccurred = False
self.logger.log("doing snapshotall now in sequence...")
snapshot_result = SnapshotResult()
blob_snapshot_info_array = []
all_failed = True
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
all_snapshots_failed = False
try:
blobs = paras.blobs
if blobs is not None:
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
blob_snapshot_info_array.append(HostSnapshotObjects.BlobSnapshotInfo(False, blobUri, None, 500))
snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.backup_metadata)
if(snapshotError.errorcode != CommonVariables.success):
snapshot_result.errors.append(snapshotError)
# update blob_snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, blob_snapshot_info_array[blob_index])
if (blob_snapshot_info_array[blob_index].isSuccessful == True):
all_failed = False
blob_index = blob_index + 1
all_snapshots_failed = all_failed
self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed))
thaw_result= None
if g_fsfreeze_on and thaw_done_local== False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)):
snapshot_result.errors.append(thaw_result.errors)
is_inconsistent= True
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
else:
self.logger.log("the blobs are None")
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform sequential snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def get_value_from_configfile(self, key):
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
else:
self.logger.log("Config File doesn't have the key :" + key)
except Exception as e:
errorMsg = " Unable to ed config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
return value
def snapshotall(self, paras, freezer, g_fsfreeze_on):
thaw_done = False
if (self.get_value_from_configfile('seqsnapshot') == '1' or self.get_value_from_configfile('seqsnapshot') == '2' or (len(paras.blobs) <= 4)):
snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)
else:
snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_parallel(paras, freezer, thaw_done, g_fsfreeze_on)
self.logger.log("exceptOccurred : " + str(exceptOccurred) + " thaw_done : " + str(thaw_done) + " all_snapshots_failed : " + str(all_snapshots_failed))
if exceptOccurred and thaw_done == False and all_snapshots_failed:
self.logger.log("Trying sequential snapshotting as parallel snapshotting failed")
snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent,thaw_done, unable_to_sleep, all_snapshots_failed = self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)
return snapshot_result, blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed
def httpresponse_get_snapshot_info(self, resp, sasuri_index, sasuri, responseBody):
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
result = CommonVariables.error_http_failure
message = ""
if(resp != None):
message = message + str(datetime.datetime.now()) + " snapshot resp status: " + str(resp.status) + " "
resp_headers = resp.getheaders()
message = message + str(datetime.datetime.now()) + " snapshot resp-header: " + str(resp_headers) + " "
if(resp.status == 200 or resp.status == 201):
result = CommonVariables.success
snapshot_info_indexer.isSuccessful = True
snapshot_info_indexer.snapshotTs = resp.getheader('x-ms-snapshot')
else:
result = resp.status
snapshot_info_indexer.errorMessage = responseBody
snapshot_info_indexer.statusCode = resp.status
else:
message = message + str(datetime.datetime.now()) + " snapshot Http connection response is None" + " "
message = message + str(datetime.datetime.now()) + ' snapshot api returned: {0} '.format(result) + " "
if(result != CommonVariables.success):
snapshot_error.errorcode = result
snapshot_error.sasuri = sasuri
return snapshot_info_indexer, snapshot_error, message
def get_snapshot_info(self, snapshot_info_indexer, snapshot_info):
if (snapshot_info_indexer != None):
self.logger.log("snapshot_info_indexer: " + str(snapshot_info_indexer))
snapshot_info.isSuccessful = snapshot_info_indexer.isSuccessful
if (snapshot_info.isSuccessful == True):
snapshot_info.snapshotUri = snapshot_info.snapshotUri + "?snapshot=" + str(snapshot_info_indexer.snapshotTs)
else:
snapshot_info.snapshotUri = None
snapshot_info.errorMessage = snapshot_info_indexer.errorMessage
snapshot_info.statusCode = snapshot_info_indexer.statusCode
else:
snapshot_info.isSuccessful = False
snapshot_info.snapshotUri = None
|
terminal_process.py | from .typecheck import *
from .import core
from .terminal import Terminal
import threading
import re
try:
if core.platform.windows:
from winpty import PtyProcess #type: ignore
else:
from ..libs.ptyprocess import PtyProcess as _PtyProcess #type: ignore
class PtyProcess(_PtyProcess): #type: ignore
def read(self):
return super().read().decode('utf-8')
SUPPORTED = True
except ImportError:
# this stuff is broken on > 4000 until the packages are 3.8 compatible
SUPPORTED = False
# from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
ansi_escape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
class TtyProcess:
def __init__(self, command: List[str], on_output: Optional[Callable[[str], None]], on_close: Optional[Callable[[], None]] = None, cwd=None) -> None:
print('Starting process: {}'.format(command))
self.process = PtyProcess.spawn(command, cwd=cwd)
self.pid = self.process.pid
self.on_close = on_close
self.closed = False
if on_output:
thread = threading.Thread(target=self._read, args=(on_output,))
thread.start()
def _read(self, callback: Callable[[str], None]) -> None:
while not self.closed:
try:
characters = self.process.read()
if not characters:
core.log_info("Nothing to read from process, closing")
break
#this isn't perfect we can easily miss some escapes since characters could span part of a single escape sequence...
characters = ansi_escape.sub('', characters)
core.call_soon_threadsafe(callback, characters)
except EOFError as err:
break
except Exception as err:
core.log_exception()
break
self.close()
def write(self, text: str):
self.process.write(bytes(text, 'utf-8'))
def close(self) -> None:
if self.closed:
return
if self.on_close:
core.call_soon_threadsafe(self.on_close)
self.closed = True
self.process.close(force=True,)
def dispose(self) -> None:
try:
self.close()
except Exception as e:
core.log_exception(e)
class TerminalProcess (Terminal):
def __init__(self, cwd: Optional[str], args: List[str]) -> None:
cwd = cwd or None # turn "" into None
super().__init__("Terminal", cwd=cwd)
self.process = TtyProcess(args, on_output=self.on_process_output, cwd=cwd)
def pid(self) -> int:
return self.process.pid
def on_process_output(self, output: str) -> None:
self.add('stdout', output)
def writeable(self) -> bool:
return True
def writeable_prompt(self) -> str:
if self.escape_input:
return "click to write escaped input to stdin"
return "click to write a line to stdin"
def write(self, text: str):
if self.escape_input:
text = text.encode('utf-8').decode("unicode_escape")
self.process.write(text + '\n')
def can_escape_input(self) -> bool:
return True
def dispose(self):
self.process.dispose()
|
train_pg_f18.py | from __future__ import print_function, division
from multiprocessing import Process
import inspect
import logz
import time
import os
import tensorflow as tf
import numpy as np
import pybulletgym.envs
import pybullet
import gym
"""
# Notes on notation =======================================================================
Symbolic variables have the prefix sym_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
obs - observation
act - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sym_logprobs_n and self.sym_adv_n that we will differentiate
to get the policy gradient.
"""
# Utilities =================================================================================
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers,
size,
activation=tf.tanh,
output_activation=None,
):
""" Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
"""
var_init = tf.initializers.variance_scaling(scale=2.0)
with tf.name_scope(scope):
h = input_placeholder
for layer in range(n_layers):
h = tf.layers.dense(
inputs=h, units=size, activation=activation, kernel_initializer=var_init
)
output_placeholder = tf.layers.dense(
inputs=h,
units=output_size,
activation=output_activation,
kernel_initializer=var_init,
)
batch_size = input_placeholder.shape.as_list()[0]
assert output_placeholder.shape.as_list() == [
batch_size,
output_size,
], "\n\noutput_placeholder.shape.as_list() is: {} \n [batch_size, output_size] is: {}\n\n".format(
output_placeholder.shape.as_list(), [batch_size, output_size]
)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Policy Gradient ===========================================================================
class Agent(object):
def __init__(
self, computation_graph_args, sample_trajectory_args, estimate_return_args
):
super(Agent, self).__init__()
self.obs_dim = computation_graph_args["obs_dim"]
self.act_dim = computation_graph_args["act_dim"]
self.discrete = computation_graph_args["discrete"]
self.size = computation_graph_args["size"]
self.n_layers = computation_graph_args["n_layers"]
self.learning_rate = computation_graph_args["learning_rate"]
self.animate = sample_trajectory_args["animate"]
self.max_path_length = sample_trajectory_args["max_path_length"]
self.min_timesteps_per_batch = sample_trajectory_args["min_timesteps_per_batch"]
self.gamma = estimate_return_args["gamma"]
self.gammas = np.power(
(self.gamma * np.ones(self.max_path_length)),
np.arange(self.max_path_length),
)
self.reward_to_go = estimate_return_args["reward_to_go"]
self.nn_baseline = estimate_return_args["nn_baseline"]
self.normalize_advantages = estimate_return_args["normalize_advantages"]
def init_tf_sess(self):
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1
)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
def define_placeholders(self):
""" Placeholders for batch batch observations / actions / advantages in
policy gradient loss function.
See Agent.build_computation_graph for notation
returns:
sym_obs_no: placeholder for observations
sym_act_na: placeholder for actions
sym_adv_n: placeholder for advantages
"""
sym_obs_no = tf.placeholder(
shape=[None, self.obs_dim], name="observations", dtype=tf.float32
)
if self.discrete:
sym_act_na = tf.placeholder(shape=[None], name="actions", dtype=tf.int32)
else:
sym_act_na = tf.placeholder(
shape=[None, self.act_dim], name="actions", dtype=tf.float32
)
# YOUR_CODE_HERE
sym_adv_n = tf.placeholder(shape=[None], name="advantages", dtype=tf.float32)
return sym_obs_no, sym_act_na, sym_adv_n
def policy_forward_pass(self, sym_obs_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sym_obs_no: (batch_size, self.obs_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sym_logits_na: (batch_size, self.act_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sym_mean: (batch_size, self.act_dim)
sym_logstd: (self.act_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sym_logits_na = build_mlp(
input_placeholder=sym_obs_no,
output_size=self.act_dim,
scope="policy_forward_pass",
n_layers=self.n_layers,
size=self.size,
activation=tf.tanh,
output_activation=None,
)
return sym_logits_na
else:
sym_mean = build_mlp(
input_placeholder=sym_obs_no,
output_size=self.act_dim,
scope="policy_forward_pass",
n_layers=self.n_layers,
size=self.size,
activation=tf.tanh,
output_activation=None,
)
sym_logstd = tf.get_variable(
name="log_std",
initializer=-0.5 * np.ones(self.act_dim, dtype=np.float32),
)
return (sym_mean, sym_logstd)
def sample_action(self, policy_parameters):
"""
Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sym_logits_na: (batch_size, self.act_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sym_mean: (batch_size, self.act_dim)
sym_logstd: (self.act_dim,)
returns:
sym_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.act_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
# raise NotImplementedError
if self.discrete:
sym_logits_na = policy_parameters
# YOUR_CODE_HERE
sym_sampled_act = tf.random.multinomial(sym_logits_na, 1)
else:
sym_mean, sym_logstd = policy_parameters
# YOUR_CODE_HERE
# print(self.act_dim)
# print(sym_mean.shape)
sym_sampled_act = (
sym_mean + tf.random.normal(tf.shape(sym_mean)) * sym_logstd
)
return sym_sampled_act
def get_log_prob(self, policy_parameters, sym_act_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sym_logits_na: (batch_size, self.act_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sym_mean: (batch_size, self.act_dim)
sym_logstd: (self.act_dim,)
sym_act_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.act_dim)
returns:
sym_logprobs_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
# raise NotImplementedError
if self.discrete:
sym_logits_na = policy_parameters
# ==================================================================
# REDERIVE THIS SECTION
# The softmax/probabilities of taking each action
# Following two are equivalent: = sym_logits_na - tf.math.log(tf.reduce_sum(tf.math.exp(sym_logits_na), axis=1))
sym_logprobs_all_actions_na = tf.nn.log_softmax(sym_logits_na)
# Sets the values of all actions not taken to 0
sym_logprobs_only_action_taken_na = (
tf.one_hot(sym_act_na, depth=self.act_dim) * sym_logprobs_all_actions_na
)
# Then reduce the dims so that it is only the probability of the action taken
sym_logprobs_n = tf.reduce_sum(sym_logprobs_only_action_taken_na, axis=1)
# ==================================================================
else:
sym_mean, sym_logstd = policy_parameters
# ==================================================================
# REDERIVE THIS SECTION
eps = 0.00001
pre_sum = -0.5 * (
((sym_act_na - sym_mean) / (tf.exp(sym_logstd) + eps)) ** 2
+ 2 * sym_logstd
+ np.log(2 * np.pi)
)
sym_logprobs_n = tf.reduce_sum(pre_sum, axis=1)
# ==================================================================
return sym_logprobs_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sym_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
obs - observation
act - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sym_logprobs_n and self.sym_adv_n that we will differentiate
to get the policy gradient.
"""
self.sym_obs_no, self.sym_act_na, self.sym_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sym_obs_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sym_sampled_act = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sym_logprobs_n = self.get_log_prob(self.policy_parameters, self.sym_act_na)
# ========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
# ========================================================================================#
# Loss = ∑_N (∑_T (log(π_ø(a_it | s_it)) * Q_it)
# The sum of log probabilities weighted by the reward to go or the advantage
adv_weighted_loss_n = self.sym_logprobs_n * self.sym_adv_n
self.loss = -tf.reduce_sum(adv_weighted_loss_n)
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# ========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# ========================================================================================#
if self.nn_baseline:
# raise NotImplementedError
self.baseline_prediction = tf.squeeze(
build_mlp(
self.sym_obs_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size,
)
)
# YOUR_CODE_HERE
# self.sym_target_n = None
# baseline_loss = None
self.sym_target_n = tf.placeholder(
shape=[None], name="value_targets", dtype=tf.float32
)
self.baseline_loss = tf.reduce_sum(
tf.square(self.sym_target_n - self.baseline_prediction)
)
self.baseline_update_op = tf.train.AdamOptimizer(
self.learning_rate
).minimize(self.baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode = len(paths) == 0 and (itr % 10 == 0) and self.animate
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
# print("single traj")
while True:
if animate_this_episode:
# print("trying_to_render")
env.render(mode="human")
# time.sleep(0.1)
obs.append(ob)
# ====================================================================================#
# ----------PROBLEM 3----------
# ====================================================================================#
# raise NotImplementedError
# ac = None # YOUR CODE HERE
ob = np.reshape(ob, (1, -1))
ac = self.sess.run(self.sym_sampled_act, feed_dict={self.sym_obs_no: ob})
ac = ac[0]
if self.discrete:
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {
"observation": np.array(obs, dtype=np.float32),
"reward": np.array(rewards, dtype=np.float32),
"action": np.array(acs, dtype=np.float32),
}
return path
# ====================================================================================#
# ----------PROBLEM 3----------
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'obs_no' and 'act_na' above.
"""
# YOUR_CODE_HERE
q_n = []
if self.reward_to_go:
# raise NotImplementedError
for path in re_n:
pathlen = path.shape[0]
for i, reward in enumerate(path):
q_n.append(np.sum(path[i:] * self.gammas[: pathlen - i]))
else:
# raise NotImplementedError
for path in re_n:
pathlen = path.shape[0]
for i, reward in enumerate(path):
q_n.append(np.sum(path * self.gammas[:pathlen]))
return np.array(q_n).flatten()
def compute_advantage(self, obs_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
obs_no: shape: (sum_of_path_lengths, obs_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# ====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
# ====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'obs_no', 'act_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
# YOUR CODE HERE
# raise NotImplementedError
# b_n = None
b_n = self.sess.run(
[self.baseline_prediction], feed_dict={self.sym_obs_no: obs_no}
)
b_n = np.array(b_n).flatten()
# Normalize the baseline prediction to match the q-values of this batch
mu = np.mean(q_n)
sigma = np.var(q_n)
b_n_norm = (b_n - mu) / sigma
adv_n = q_n - b_n_norm
else:
adv_n = q_n.copy()
print("sum_of_path_lengths", adv_n.shape)
return adv_n
def estimate_return(self, obs_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
obs_no: shape: (sum_of_path_lengths, obs_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(obs_no, q_n)
# ====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
# ====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# raise NotImplementedError
# adv_n = None # YOUR_CODE_HERE
mu = np.mean(adv_n)
sigma = np.var(adv_n)
adv_n = (adv_n - mu) / sigma
return q_n, adv_n
def update_parameters(self, obs_no, act_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
obs_no: shape: (sum_of_path_lengths, obs_dim)
act_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
# ====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
# ====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
# raise NotImplementedError
# target_n = None
# Normalize the baseline prediction
mu = np.mean(q_n)
sigma = np.var(q_n)
target_n = (q_n - mu) / sigma
# Optimization (should this be before or after the policy update???)
if self.nn_baseline:
_, baseline_loss = self.sess.run(
[self.baseline_update_op, self.baseline_loss],
feed_dict={self.sym_obs_no: obs_no, self.sym_target_n: target_n},
)
# ====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
# ====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# raise NotImplementedError
_, loss = self.sess.run(
[self.update_op, self.loss],
feed_dict={
self.sym_obs_no: obs_no,
self.sym_act_na: act_na,
# self.sym_adv_n:q_n,
self.sym_adv_n: adv_n,
},
)
print("loss: {}".format(loss))
print("bl-loss: {}".format(baseline_loss))
return loss
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size,
):
start = time.time()
# ========================================================================================#
# Set Up Logger
setup_logger(logdir, locals())
# ========================================================================================#
# Set Up Env
# Make the gym environment
env = gym.make(env_name)
if animate:
env.render(mode="human")
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = int(max_path_length or env.spec.max_episode_steps)
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.n if discrete else env.action_space.shape[0]
# ========================================================================================#
# Initialize Agent
computation_graph_args = {
"n_layers": n_layers,
"obs_dim": obs_dim,
"act_dim": act_dim,
"discrete": discrete,
"size": size,
"learning_rate": learning_rate,
}
sample_trajectory_args = {
"animate": animate,
"max_path_length": max_path_length,
"min_timesteps_per_batch": min_timesteps_per_batch,
}
estimate_return_args = {
"gamma": gamma,
"reward_to_go": reward_to_go,
"nn_baseline": nn_baseline,
"normalize_advantages": normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
# ========================================================================================#
# Training Loop
total_timesteps = 0
loss_hist = []
for itr in range(n_iter):
print("********** Iteration %i ************" % itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
obs_no = np.concatenate([path["observation"] for path in paths])
act_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(obs_no, re_n)
loss = agent.update_parameters(obs_no, act_na, q_n, adv_n)
loss_hist.append(loss)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("env_name", type=str)
parser.add_argument("--exp_name", "-e", type=str, default="vpg")
parser.add_argument("--render", "-r", action="store_true")
parser.add_argument("--render_at_end", "-re", action="store_true")
parser.add_argument("--discount", type=float, default=1.0)
parser.add_argument("--n_iter", "-n", type=int, default=100)
parser.add_argument("--batch_size", "-b", type=int, default=1000)
parser.add_argument("--ep_len", "-ep", type=float, default=-1.0)
parser.add_argument("--learning_rate", "-lr", type=float, default=5e-3)
parser.add_argument("--reward_to_go", "-rtg", action="store_true")
parser.add_argument("--dont_normalize_advantages", "-dna", action="store_true")
parser.add_argument("--nn_baseline", "-bl", action="store_true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--n_experiments", "-p", type=int, default=1)
parser.add_argument("--n_layers", "-l", type=int, default=2)
parser.add_argument("--size", "-s", type=int, default=64)
args = parser.parse_args()
if not (os.path.exists("data")):
os.makedirs("data")
logdir = (
args.exp_name + "_" + args.env_name + "_" + time.strftime("%d-%m-%Y_%H-%M-%S")
)
logdir = os.path.join("data", logdir)
if not (os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print("Running experiment with seed %d" % seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render if e == 0 else False,
logdir=os.path.join(logdir, "%d" % seed),
normalize_advantages=not (args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=int(args.size),
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
bittorrent.py | #!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Matt Chisholm and Greg Hazel
from __future__ import division
app_name = "BitTorrent"
import os
import sys
try:
from BitTorrent.translation import _
except ImportError, e:
if os.name == 'posix':
# Ugly Idiot-proofing -- this should stop ALL bug reports from
# people unable to run BitTorrent after installation on Debian
# and RedHat based systems.
pythonversion = sys.version[:3]
py24 = os.path.exists('/usr/lib/python2.4/site-packages/BitTorrent/')
py23 = os.path.exists('/usr/lib/python2.3/site-packages/BitTorrent/')
if not py24 and not py23:
print "There is no BitTorrent package installed on this system."
elif py24 and py23:
print """
There is more than one BitTorrent package installed on this system,
at least one under Python 2.3 and at least one under Python 2.4."""
else:
print """
A BitTorrent package for the wrong version of Python is installed on this
system. The default version of Python on this system is %s. However, the
BitTorrent package is installed under Python %s.""" % (pythonversion, (py24 and '2.4' or '2.3'))
print """
To install BitTorrent correctly you must first:
* Remove *all* versions of BitTorrent currently installed.
Then, you have two options:
* Download and install the .deb or .rpm package for
BitTorrent & Python %s
* Download the source .tar.gz and follow the directions for
installing under Python %s
Visit http://www.bittorrent.com/ to download BitTorrent.
""" % (pythonversion, pythonversion)
sys.exit(1)
else:
raise
import time
import BTL.stackthreading as threading
import logging
debug=False
#debug=True
from BTL import atexit_threads
assert sys.version_info >= (2, 3), _("Install Python %s or greater") % '2.3'
from BitTorrent import BTFailure, inject_main_logfile
from BitTorrent import configfile
from BTL.defer import DeferredEvent, wrap_task
from BitTorrent.defaultargs import get_defaults
from BitTorrent.IPC import ipc_interface
from BitTorrent.prefs import Preferences
from BitTorrent.RawServer_twisted import RawServer
if os.name == 'nt':
from BitTorrent.platform import win_version_num
from BitTorrent import zurllib
defaults = get_defaults('bittorrent')
defaults.extend((('donated' , '', ''), # the version that the user last donated for
('notified', '', ''), # the version that the user was last notified of
))
defconfig = dict([(name, value) for (name, value, doc) in defaults])
del name, value, doc
inject_main_logfile()
global_logger = logging.getLogger('')
rawserver = None
if __name__ == '__main__':
psyco = None
try:
# 95, 98, and ME seem to have problems with psyco
# so only import it on NT and up
# and only if we're not using python 2.5, becuase it's broken
if (os.name == 'nt' and win_version_num >= (2, 4, 0) and
sys.version_info < (2, 5)):
import psyco_BROKEN
import traceback
psyco.cannotcompile(traceback.print_stack)
psyco.cannotcompile(traceback.format_stack)
psyco.cannotcompile(traceback.extract_stack)
#psyco.full(memory=10)
psyco.bind(RawServer.listen_forever)
from BTL import sparse_set
psyco.bind(sparse_set.SparseSet)
from BitTorrent import PiecePicker
psyco.bind(PiecePicker.PieceBuckets)
psyco.bind(PiecePicker.PiecePicker)
from BitTorrent import PieceSetBuckets
psyco.bind(PieceSetBuckets.PieceSetBuckets)
psyco.bind(PieceSetBuckets.SortedPieceBuckets)
psyco.profile(memorymax=30000) # that's 30MB for the whole process
#psyco.log()
# see below for more
except ImportError:
pass
zurllib.add_unsafe_thread()
try:
config, args = configfile.parse_configuration_and_args(defaults,
'bittorrent', sys.argv[1:], 0, None)
if debug:
config['upnp'] = False
config['one_connection_per_ip'] = False
except BTFailure, e:
print unicode(e.args[0])
sys.exit(1)
config = Preferences().initWithDict(config)
# bug set in DownloadInfoFrame
rawserver = RawServer(config)
zurllib.set_zurllib_rawserver(rawserver)
rawserver.install_sigint_handler()
ipc = ipc_interface(rawserver, config, "controlsocket")
# make sure we clean up the ipc when everything is done
atexit_threads.register_verbose(ipc.stop)
# this could be on the ipc object
ipc_master = True
try:
if not config['use_factory_defaults']:
ipc.create()
except BTFailure, e:
ipc_master = False
try:
ipc.send_command('no-op')
if config['publish']:
assert len(args) == 1
ipc.send_command('publish_torrent', args[0], config['publish'])
sys.exit(0)
elif args:
for arg in args:
ipc.send_command('start_torrent', arg)
sys.exit(0)
ipc.send_command('show_error', _("%s already running")%app_name)
except BTFailure:
global_logger.error((_("Failed to communicate with another %s process "
"but one seems to be running.") % app_name) +
(_(" Closing all %s windows may fix the problem.")
% app_name))
sys.exit(1)
from BitTorrent.MultiTorrent import MultiTorrent
from BTL.ThreadProxy import ThreadProxy
from BitTorrent.TorrentButler import DownloadTorrentButler, SeedTorrentButler
from BitTorrent.AutoUpdateButler import AutoUpdateButler
from BitTorrent.GUI_wx.DownloadManager import MainLoop
from BitTorrent.GUI_wx import gui_wrap
def gmtime():
return time.mktime(time.gmtime())
if __name__ == '__main__':
#import memleak_detection
#memleak_detection.begin_sampling('memleak_sample.log')
if psyco:
psyco.bind(MainLoop.run)
config['start_time'] = gmtime()
mainloop = MainLoop(config)
def init_core(mainloop):
core_doneflag = DeferredEvent()
class UILogger(logging.Handler):
def emit(self, record):
msg = "[%s] %s" % (record.name, self.format(record))
gui_wrap(mainloop.do_log, record.levelno, msg)
logging.getLogger('').addHandler(UILogger())
try:
multitorrent = MultiTorrent(config, rawserver, config['data_dir'],
listen_fail_ok=True,
init_torrents=False)
# Butlers
multitorrent.add_policy(DownloadTorrentButler(multitorrent))
multitorrent.add_policy(SeedTorrentButler(multitorrent))
auto_update_butler = AutoUpdateButler(multitorrent, rawserver,
test_new_version=config['new_version'],
test_current_version=config['current_version'])
multitorrent.add_auto_update_policy(auto_update_butler)
# attach to the UI
tpm = ThreadProxy(multitorrent,
gui_wrap,
wrap_task(rawserver.external_add_task))
mainloop.attach_multitorrent(tpm, core_doneflag)
ipc.start(mainloop.external_command)
#rawserver.associate_thread()
# register shutdown action
def shutdown():
df = multitorrent.shutdown()
stop_rawserver = lambda r : rawserver.stop()
df.addCallbacks(stop_rawserver, stop_rawserver)
rawserver.add_task(0, core_doneflag.addCallback,
lambda r: rawserver.external_add_task(0, shutdown))
rawserver.listen_forever()
except:
# oops, we failed.
# one message for the log w/ exception info
global_logger.exception("BitTorrent core initialization failed!")
# one message for the user w/o info
global_logger.critical("BitTorrent core initialization failed!")
core_doneflag.set()
rawserver.stop()
try:
gui_wrap(mainloop.ExitMainLoop)
except:
pass
try:
gui_wrap(mainloop.doneflag.set)
except:
pass
raise
threading.Thread(target=init_core, args=(mainloop,)).start()
mainloop.append_external_torrents(*args)
## # cause memleak stuff to be imported
## import code
## import sizer
##
## from sizer import annotate
## from sizer import formatting
## from sizer import operations
## from sizer import rules
## from sizer import scanner
## from sizer import set
## from sizer import sizes
## from sizer import wrapper
try:
mainloop.run()
except KeyboardInterrupt:
# the gui main loop is closed in MainLoop
pass
|
pdv.py | # Copyright (c) 2011-2019, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by Mason Kwiat, Douglas S. Miller, and Kevin Griffin
# e-mail: griffin28@llnl.gov
# LLNL-CODE-507071
# All rights reserved.
# This file is part of PDV. For details, see <URL describing code and
# how to download source>. Please also read "Additional BSD Notice".
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below)
# in the documentation and/or other materials provided with the
# distribution. Neither the name of the LLNS/LLNL nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
# LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# Additional BSD Notice
# 1. This notice is required to be provided under our contract with
# the U.S. Department of Energy (DOE). This work was produced at
# Lawrence Livermore National Laboratory under Contract
# No. DE-AC52-07NA27344 with the DOE.
# 2. Neither the United States Government nor Lawrence Livermore
# National Security, LLC nor any of their employees, makes any
# warranty, express or implied, or assumes any liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents
# that its use would not infringe privately-owned rights.
# 3. Also, reference herein to any specific commercial products,
# process, or services by trade name, trademark, manufacturer or
# otherwise does not necessarily constitute or imply its endorsement,
# recommendation, or favoring by the United States Government or
# Lawrence Livermore National Security, LLC. The views and opinions
# of authors expressed herein do not necessarily state or reflect
# those of the United States Government or Lawrence Livermore National
# Security, LLC, and shall not be used for advertising or product
# endorsement purposes.
import cmd
import sys, os, re, time
import string
from threading import Thread
import numpy
from math import *
from numpy import *
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as mclr
from matplotlib.backends import qt_compat
use_pyside = qt_compat.QT_API == qt_compat.QT_API_PYSIDE
if use_pyside:
from PySide.QtCore import *
from PySide.QtGui import *
else:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import traceback
import new
import readline
import code
from numbers import Number
import pydvpy as pydvif
import curve
import pdvplot
import pdvutil
try:
from matplotlib import style
stylesLoaded = True
except:
stylesLoaded = False
class Command(cmd.Cmd, object):
prompt = '[PyDV]: '
undoc_header = 'Command Shortcuts:'
ruler = '='
curvelist = list()
filelist = []
plotlist = []
plotfirst = []
oldlist = []
usertexts = []
history = []
histptr = -1
plotedit = False
plotter = None#pdvplot.Plotter()
app = None
## state variables##
xlabel = ''
ylabel = ''
figcolor = None
plotcolor = None
xtickcolor = None
ytickcolor = None
title = ''
titlecolor = None
xlabelcolor = None
ylabelcolor = None
xlim = None
ylim = None
showkey = True
handlelength = None
key_loc = 1
key_ncol = 1
showgrid = True
showletters = True
xlogscale = False
ylogscale = False
titlefont = 'large'
xlabelfont = 'medium'
ylabelfont = 'medium'
axistickfont = 'medium'
keyfont = 'small'
keycolor = 'black'
curvelabelfont = 'medium'
annotationfont = 'medium'
initrun = None
update = True
guilims = False
geometry = 'de'
numticks = 'de'
xnumticks = 'de'
ynumticks = 'de'
xticks = 'de'
yticks = 'de'
xCol = 0 # column to use for x-axis, if doing column format reads
debug = False
redraw = True
xtickformat = 'de'
ytickformat = 'de'
xticklength = 4
yticklength = 4
xtickwidth = 1
ytickwidth = 1
namewidth = 25
updatestyle = False
linewidth = None
##check for special character/operator commands##
def precmd(self, line):
pl = []
for i in range(len(self.plotlist)):
pl.append(self.plotlist[i].copy())
self.oldlist = pl
line = line.strip()
if not line:
return line
check = line.split()[0].strip()
if(check == '+'):
line = 'add ' + line[1:]
elif(check == '-'):
line = 'subtract ' + line[1:]
elif(check == '/'):
line = 'divide ' + line[1:]
elif(check == '*'):
line = 'multiply ' + line[1:]
line = line.replace('re-id', 'reid')
line = line.replace('data-id', 'dataid')
line = line.replace('x-log-scale', 'xlogscale')
line = line.replace('y-log-scale', 'ylogscale')
line = line.replace('make-curve', 'makecurve')
line = line.replace('error-bar', 'errorbar')
line = line.replace('error-range', 'errorrange')
line = line.replace('get-range', 'getrange')
line = line.replace('get-domain', 'getdomain')
line = line.replace('diff-measure', 'diffMeasure')
line = line.replace('integrate(', 'commander.integrate(').replace('int(', 'commander.integrate(')
line = line.replace('derivative(', 'commander.derivative(').replace('der(', 'commander.derivative(')
#print line
if self.showkey:
if plt.gca().get_legend() is not None:
self.key_loc = plt.gca().get_legend()._loc
if self.plotter is not None:
if self.plotter.plotChanged:
self.apply_uichanges()
return line
##check for arithmetic calculation##
def default(self, line):
try:
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
self.redraw = False
print 'error - unknown syntax: ' + line
if self.debug:
traceback.print_exc(file=sys.stdout)
## save current state for undo/redo##
def postcmd(self, stop, line):
if self.plotedit:
#self.history.pop(-1)
#pl = []
#for i in range(len(self.plotlist)):
# pl.append(self.plotlist[i].copy())
#print self.history
#print pl
if len(self.history) > self.histptr+1:
self.history = self.history[:self.histptr+1]
self.histptr += 1
self.history.append(self.oldlist)#(self.histptr, self.oldlist)
if(len(self.history) > 15):
self.history.pop(0)
self.histptr -= 1
#print self.history
#print self.histptr
self.plotedit = False
if self.update:
if self.redraw:
self.updateplot
self.redraw = True
return stop
##override cmd empty line function to not repeat last command##
def emptyline(self):
self.redraw = False
##normal shortcut commands##
def do_q(self, line):
self.do_quit(line)
def do_ran(self, line):
self.do_range(line)
def do_dom(self, line):
self.do_domain(line)
def do_rd(self, line):
self.do_read(line)
def do_rdcsv(self, line):
self.do_readcsv(line)
def do_cur(self, line):
self.do_curve(line)
def do_era(self, line):
self.do_erase(line)
def do_del(self, line):
self.do_delete(line)
def do_lst(self, line):
self.do_list(line)
def do_sub(self, line):
self.do_subtract(line)
def do_div(self, line):
self.do_divide(line)
def do_mult(self, line):
self.do_multiply(line)
def do_xls(self, line):
self.do_xlogscale(line)
def do_yls(self, line):
self.do_ylogscale(line)
def do_der(self, line):
self.do_derivative(line)
def do_pow(self, line):
self.do_powr(line)
def do_power(self, line):
self.do_powr(line)
def do_powx(self, line):
self.do_powrx(line)
def do_powerx(self, line):
self.do_powrx(line)
def do_square(self, line):
self.do_sqr(line)
def do_squarex(self, line):
self.do_sqrx(line)
def do_convol(self, line):
self.do_convolve(line)
def do_convolf(self, line):
self.do_convolvef(line)
def do_convolb(self, line):
self.do_convolveb(line)
def do_convolc(self, line):
self.do_convolvec(line)
def do_int(self, line):
self.do_integrate(line)
def do_geom(self, line):
self.do_geometry(line)
def do_xmm(self, line):
self.do_xminmax(line)
def do_ymm(self, line):
self.do_yminmax(line)
def do_key(self, line):
self.do_legend(line)
def do_leg(self, line):
self.do_legend(line)
def do_ln(self, line):
self.do_log(line)
def do_lnx(self, line):
self.do_logx(line)
def do_nc(self, line):
self.do_newcurve(line)
def do_mkext(self, line):
self.do_makeextensive(line)
def do_mkint(self, line):
self.do_makeintensive(line)
def do_system(self, line):
self.do_shell(line)
##override help function to check for shortcuts##
def do_help(self, arg):
if(arg == '+'):
arg = 'add'
elif(arg == '-' or arg == 'sub'):
arg = 'subtract'
elif(arg == '*' or arg == 'mult'):
arg = 'multiply'
elif(arg == '/' or arg == 'div'):
arg = 'divide'
elif(arg == 'rd'):
arg = 'read'
elif(arg == 'rdcsv'):
arg = 'readcsv'
elif(arg == 'convol'):
arg = 'convolve'
elif (arg == 'convolb'):
arg = 'convolveb'
elif(arg == 'convolc'):
arg = 'convolvec'
elif(arg == 'cur'):
arg = 'curve'
elif(arg == 'era'):
arg = 'erase'
elif(arg == 'del'):
arg = 'delete'
elif(arg == 'ran'):
arg = 'range'
elif(arg == 'dom'):
arg = 'domain'
elif(arg == 'lst'):
arg = 'list'
elif(arg == 'q'):
arg = 'quit'
elif(arg == 'data-id'):
arg = 'dataid'
elif(arg == 're-id'):
arg = 'reid'
elif(arg == 'x-log-scale' or arg == 'xls'):
arg = 'xlogscale'
elif(arg == 'y-log-scale' or arg == 'yls'):
arg = 'ylogscale'
elif(arg == 'der'):
arg = 'derivative'
elif(arg == 'pow' or arg == 'power'):
arg = 'powr'
elif(arg == 'powx' or arg == 'powerx'):
arg = 'powrx'
elif(arg == 'make-curve'):
arg = 'makecurve'
elif(arg == 'error-bar'):
arg = 'errorbar'
elif(arg == 'int'):
arg = 'integrate'
elif(arg == 'geom'):
arg = 'geometry'
elif(arg == 'key'):
arg = 'legend'
elif(arg == 'leg'):
arg = 'legend'
elif(arg == 'error-range'):
arg = 'errorrange'
elif(arg == 'get-domain'):
arg = 'getdomain'
elif(arg == 'get-range'):
arg = 'getrange'
elif(arg == 'xmm'):
arg = 'xminmax'
elif(arg == 'ymm'):
arg = 'yminmax'
elif(arg == 'ln'):
arg = 'log'
elif(arg == 'lnx'):
arg = 'logx'
elif(arg == 'nc'):
arg = 'newcurve'
elif (arg == 'mkext'):
arg = 'makeextensive'
elif (arg == 'mkint'):
arg = 'makeintensive'
elif(arg == 'system'):
arg = 'shell'
self.redraw = False # never need to redraw after a 'help'
return super(Command, self).do_help(arg)
##execute shell commands##
def do_shell(self, line):
os.system(line)
def help_shell(self):
print "\n Procedure: Execute shell commands. The symbol \'!\' is a synonym for \'shell\'.\n Usage: <shell | system> <command>\n"
########################################################################################################
#command functions#
########################################################################################################
##evaluate a line of mathematical operations##
def do_newcurve (self, line):
try:
# check for obvious input errors
if(not line):
print 'error - no arguments to newcurve, do not tease me this way'
return 0
if(len(line.split(':')) > 1):
print 'error - NOT HANDLING RANGES YET, not even sure what that would mean yet'
return 0
else: # ok, got through input error checking, let's get to work
newline = line # copy the original line, we need the original for labeling
# replace all the *.x and *.y entries in the line with
# their actual data arrays in the plotlist.
import re # we are going to need regex!
arrayMarkers = ['.x', '.y']
for arrayMarker in arrayMarkers:
arrayInsts = re.findall(r"\w\%s" % arrayMarker, line) # finds [a-z].x then [a-z].y
for aInst in arrayInsts:
plotname = aInst[0] # BLAGO!! hard wired for single-letter labels
cID = pdvutil.getCurveIndex(plotname, self.plotlist)
newline = re.sub(r"%s\%s" % (plotname, arrayMarker),
"self.plotlist[%d]%s" % (cID, arrayMarker), newline)
# now newline holds a string that can be evaluated by Python
newYArray = eval(newline) # line returns a new numpy.array
# make newYArray into a legitimate curve
c = curve.Curve(filename = '', name = line) # we name the curve with the input 'line'
c.plotname = self.getcurvename() # get the next available data ID label
c.y = newYArray
# get the x-values from one of the curves used in the expression
c.x = self.plotlist[cID].x
self.addtoplot(c)
self.plotedit = True
except:
print 'error - usage: newcurve <expression>'
print 'try "help newcurve" for much more info'
if(self.debug):
traceback.print_exc(file=sys.stdout)
def help_newcurve(self):
print '\n newcurve creats a new curve from an expression\n Usage: newcurve <numpy expression>\n'
print 'For convenience, both math and numpy modules have been imported into the namespace.'
print 'Just FYI, this feature is way outside the ULTRA syntax PDV is mostly based on.'
print 'EXAMPLE:'
print
print '[PDV]: newcurve sin(a.x*2*pi)/(h.y**2)'
print
print 'This creates a new curve according to the complicated expression.'
print 'You can abbreviate newcurve as nc.'
print
print 'WARNINGS:'
print '* Currently, newcurve is hard-wired to only handle single-letter labels.'
print ' Curve names used in the expression cannot be the @N type we use after'
print ' we run out of letters. Sorry (April 2015).'
print '* A common error is to forget the .x or .y on the curve label name.'
print '* All the arrays in your expression have to span the same domain! Currently (4/2015), newcurve'
print ' will generate a curve from different domains (with no error message), and that curve'
print ' will almost certainly not be what you intended.'
print
##evaluate a line of mathematical operations##
def do_eval(self, line):
try:
line = line.replace('integrate', 'commander.integrate').replace('int', 'commander.integrate')
line = line.replace('derivative', 'commander.derivative').replace('der', 'commander.derivative')
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
print 'error - usage: eval <curve-operations>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_eval(self):
print '\n Procedure: Evaluate mathematical operations on curves\n Usage: eval <curve-operations>\n'
##turn on debug tracebacks for commands##
def do_debug(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
self.debug = False
elif(line == '1' or line.upper() == 'ON'):
self.debug = True
else:
print 'invalid input: requires on or off as argument'
except:
if(self.debug):
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_debug(self):
print '\n Variable: Show debug tracebacks if True\n Usage: debug on | off\n'
##undo last operation on a curve##
def do_undo(self, line):
try:
if(self.histptr > 0):
if(self.histptr == len(self.history)-1):
pl = []
for i in range(len(self.plotlist)):
pl.append(self.plotlist[i].copy())
self.history.append(pl)
self.plotlist = self.history[self.histptr]
#self.history = self.history[:self.histptr]
self.histptr -= 1
#print self.history
#print self.histptr
else:
print 'error - cannot undo further'
except:
print 'error - usage: undo'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_undo(self):
print '\n Procedure: Undo the last operation on plotted curves\n Usage: undo\n'
##redo last curve operation undo##
def do_redo(self, line):
try:
if(self.histptr < len(self.history)-2):
self.histptr += 1
self.plotlist = self.history[self.histptr+1]
#print self.history
#print self.histptr
else:
print 'error - cannot redo further'
except:
print 'error - usage: redo'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_redo(self):
print '\n Procedure: Redo the last undone curve operation\n Usage: redo\n'
##add one or more curves and plot resulting curve##
def do_add(self, line):
try:
if(len(line.split(':')) > 1):
self.do_add(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
line = ' + '.join(line)
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
print 'error - usage: add <curve-list>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_add(self):
print '\n Procedure: Take sum of curves\n Usage: add <curve-list>\n Shortcuts: +\n'
##subtract one or more curves##
def do_subtract(self, line):
try:
if(len(line.split(':')) > 1):
self.do_subtract(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
if(len(line) == 1):
line = '-' + line[0]
else:
line = ' - '.join(line)
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
print 'error - usage: sub <curve-list>'
if(self.debug):
traceback.print_exc(file=sys.stdout)
def help_subtract(self):
print '\n Procedure: Take difference of curves\n Usage: subtract <curve-list>\n Shortcuts: - , sub\n'
##multiply one or more curves##
def do_multiply(self, line):
try:
if(len(line.split(':')) > 1):
self.do_multiply(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
line = ' * '.join(line)
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
print 'error - usage: mult <curve-list>'
if(self.debug):
traceback.print_exc(file=sys.stdout)
def help_multiply(self):
print '\n Procedure: Take product of curves\n Usage: multiply <curve-list>\n Shortcuts: * , mult\n'
##divide one or more curves##
def do_divide(self, line):
try:
if(len(line.split(':')) > 1):
self.do_divide(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
line = ' / '.join(line)
pdvutil.parsemath(line, self.plotlist, self, (plt.axis()[0],plt.axis()[1]))
self.plotedit = True
except:
print 'error - usage: div <curve-list>'
if(self.debug):
traceback.print_exc(file=sys.stdout)
def help_divide(self):
print '\n Procedure: Take quotient of curves\n Usage: divide <curve-list>\n Shortcuts: / , div\n'
##read in an ultra file##
def do_read(self, line):
try:
line = line.split()
n = len(line)
if n == 1:
self.load(line[0])
elif n == 2:
if line[0].isdigit():
self.xCol = int(line[0])
self.load(line[1], True)
else:
raise RuntimeError, 'expecting an x-column number.'
elif n == 3:
line[0] = line[0].strip().strip('()')
matches = int(line[1])
if matches < 0:
matches = None
self.load(line[2], False, line[0], matches)
elif n == 4:
line[0] = line[0].strip().strip('()')
matches = int(line[1])
if matches < 0:
matches = None
self.xCol = int(line[2])
self.load(line[3], True, line[0], matches)
else:
print 'error - Usage: read [(regex) matches] [x-col] <file-name>'
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
self.plotter.updateDialogs()
def help_read(self):
print ('\n Macro: Read curve data file\n Usage: read [(regex) matches] [x-col] <file-name>\n Shortcuts: rd\n'
' If using regex, set matches equal to a negative number for unlimited matches.\n'
' For column oriented (.gnu) files optionally specify the x-column number before the file name.')
##read in a csv file##
def do_readcsv(self, line):
try:
line = line.split()
n = len(line)
i = 0
while i < n:
if line[i].isdigit():
assert i < n - 1
self.xCol = int(line[i])
i += 1
self.load_csv(line[i])
i += 1
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
self.plotter.updateDialogs()
def help_readcsv(self):
print ('\n Macro: Read csv data file\n Usage: readcsv <file-name>\n Shortcuts: rdcsv\n'
' For column oriented (.gnu) files optionally specify the x-column number '
'before the file name, as in "rdcsv 1 file.csv".\n')
## set x-column for cxv or gnu files explicitly
def do_setxcolumn(self, line):
try:
line = line.split()
if len(line) > 1:
print 'error - usage: setxcolumn n'
else:
self.xCol = string.atoi(line[0])
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_setxcolumn(self):
print ('\n set x column for reading column formatted data files (.gnu or .csv).\n'
'usage: setxcolumn n, where n is an integer.')
#graph the given curves##
def do_curve(self, line):
if not line:
return 0
try:
if(len(line.split(')')) > 1): #check for regular expression
line = line.strip().split(')')
reg = re.compile(r"")
for i in range(len(line)):
line[i] = line[i].strip().strip('(')
if line[0].split()[0] == 'menu':
line[0] = ' '.join(line[0].split()[1:])
try:
reg = re.compile(r"%s" % line[0])
except:
print 'error: invalid expression'
return 0
self.do_menu(line[0])
regline = ''
for i in range(len(self.curvelist)):
searchline = self.curvelist[i].name + ' ' + self.curvelist[i].filename
if reg.search(searchline):
regline += str(i+1) + ' '
line[0] = regline
line = ' '.join(line)
self.do_curve(line) # call curve again but with regexp results
self.redraw = True
return 0
if(len(line.split(':')) > 1): #check for list notation
# call curve again with list expanded
self.do_curve(pdvutil.getnumberargs(line, self.filelist))
return 0
else:
line = line.split()
for i in range(len(line)):
curvedex = 0
skip = False
if(ord(line[i][0].upper()) >= ord('A') and ord(line[i][0].upper()) <= ord('Z')): #check for a.% b.% file index notation
filedex = ord(line[i][0].upper()) - ord('A') # file index we want
prevfile = '' # set prevfile to impossible value
filecounter = 0
while filecounter <= filedex: # count files up to the one we want
if self.curvelist[curvedex].filename != prevfile: # inc count if name changes
prevfile = self.curvelist[curvedex].filename
filecounter += 1
curvedex += 1 # this will end up being one past what we want
if curvedex >= len(self.curvelist):
print "error: in curve list did not find matching file for %s" % line[i]
curvedex -= 1 # back curvedex up to point to start of file's curves
curvedex += int(line[i].split('.')[-1])-1
elif(int(line[i]) > 0 and int(line[i]) <= len(self.curvelist)):
curvedex = int(line[i])-1
else:
print 'error: curve index out of bounds: ' + line[i]
skip = True
if(not skip):
current = self.curvelist[curvedex].copy()
self.addtoplot(current)
self.plotedit = True
except:
print 'error - usage: curve <(<regex>) | list-of-menu-numbers>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_curve(self):
print '\n Procedure: Select curves from the menu for plotting\n Usage: curve <(<regex>) | list-of-menu-numbers>\n Shortcuts: cur\n'
##remove all curves from the graph##
def do_erase(self, line):
self.plotlist = []
self.usertexts = []
self.plotedit = True
def help_erase(self):
print '\n Macro: Erases all curves on the screen but leaves the limits untouched\n Usage: erase\n Shortcuts: era\n'
##remove a curve from the graph##
def do_delete(self, line):
if not line:
return 0
try:
if(len(line.split(':')) > 1):
self.do_delete(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()):
curve.plotname = ''
self.plotlist.pop(j)
break
self.plotedit = True
except:
print 'error - usage: del <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_delete(self):
print '\n Procedure: Delete curves from list\n Usage: delete <curve-list>\n Shortcuts: del\n'
##set a specific color for a list of curves##
def do_color(self, line):
if not line:
return 0
try:
line = line.split()
color = line.pop(-1)
line = ' '.join(line)
if(len(line.split(':')) > 1):
self.do_color(pdvutil.getletterargs(line) + color)
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()):
if(mclr.is_color_like(color)):
self.plotlist[j].color = color
else:
print 'error: invalid color ' + color
return 0
break
self.plotedit = True
except:
print 'error - usage: color <curve-list> <color-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_color(self):
print '\n Procedure: Set the color of curves\n Usage: color <curve-list> <color-name>\n Color names can be "blue", "red", etc, or "#eb70aa", a 6 digit set\n of hexadecimal red-green-blue values (RRGGBB).\n The entire set of HTML-standard color names is available.\n Try "showcolormap" to see the available named colors!'
##return a curves mean and standard deviation##
def do_stats(self,line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_stats(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
yval = numpy.array(cur.y)
mean = (sum(yval)/len(yval))
ystd = numpy.std(yval, ddof = 1)
print '\nCurve ' + cur.plotname
print ' Mean: {} Standard Deviation: {}'.format(mean, ystd)
print '\n'
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_stats(self):
print ('\n Display the mean and standard deviation for the given curves.\n usage: stats <curve-list>\n')
##return a curve's attributes##
def do_getattributes(self,line):
try:
line = line.split()
found = False
for i in range(len(self.plotlist)):
cur = self.plotlist[i]
if cur.plotname == line[0].upper():
print '\n'
print ' Plot name = {}'.format(cur.plotname)
print ' Color = {}'.format(cur.color)
#pretty print line style
if cur.linestyle == '-':
pp_linestyle = 'solid'
elif cur.linestyle == ':':
pp_linestyle = 'dot'
elif cur.linestyle == '--':
pp_linestyle= 'dash'
elif cur.linestyle == '-.':
pp_linestyle = 'dotdash'
print ' Style = {}'.format(pp_linestyle)
print ' Curve width = {} '.format(cur.linewidth)
print ' Edited = {}'.format(cur.edited)
print ' Scatter = {}'.format(cur.scatter)
print ' Linespoints = {}'.format(cur.linespoints)
print ' Drawstyle = {}'.format (cur.drawstyle)
print ' Dashes = {}'.format (cur.dashes)
print ' Hidden = {}'.format (cur.hidden)
print ' Marker = {}'.format (cur.marker)
print ' Markersize = {}'.format (cur.markersize)
print ' Markeredgecolor = {}'.format (cur.markeredgecolor)
print ' Markerfacecolor = {}'.format (cur.markerfacecolor)
print ' Ebar = {}'.format (cur.ebar)
print ' Erange = {}'.format (cur.erange)
print ' Plotprecedence = {}'.format (cur.plotprecedence)
print '\n'
found = True
break
if not found:
print '\nerror - Curve %s does not exist - usage: getattributes <curve>\n' % line[0]
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getattributes(self):
print ('\n Display the given curve\'s attributes, such as: color, style, and width. usage: getattributes <curve>')
##set the markerface color for a list of curves##
def do_markerfacecolor(self, line):
if not line:
return 0
try:
line = line.split()
color = line.pop(-1)
line = ' '.join(line)
if len(line.split(':')) > 1:
self.do_markerfacecolor(pdvutil.getletterargs(line) + color)
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
if mclr.is_color_like(color):
self.plotlist[j].markerfacecolor = color
else:
print 'error: invalid markerface color ' + color
return 0
break
self.plotedit = True
except:
print 'error - usage: markerfacecolor <curve-list> <color-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_markerfacecolor(self):
print '\n Procedure: Set the markerface color of curves\n Usage: markerfacecolor <curve-list> <color-name>\n Color names can be "blue", "red", etc, or "#eb70aa", a 6 digit set\n of hexadecimal red-green-blue values (RRGGBB).\n The entire set of HTML-standard color names is available.\n Try "showcolormap" to see the available named colors!'
##set the markeredge color for a list of curves##
def do_markeredgecolor(self, line):
if not line:
return 0
try:
line = line.split()
color = line.pop(-1)
line = ' '.join(line)
if len(line.split(':')) > 1:
self.do_markeredgecolor(pdvutil.getletterargs(line) + color)
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
if mclr.is_color_like(color):
self.plotlist[j].markeredgecolor = color
else:
print 'error: invalid markeredge color ' + color
return 0
break
self.plotedit = True
except:
print 'error - usage: markeredgecolor <curve-list> <color-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_markeredgecolor(self):
print '\n Procedure: Set the markeredge color of curves\n Usage: markeredgecolor <curve-list> <color-name>\n Color names can be "blue", "red", etc, or "#eb70aa", a 6 digit set\n of hexadecimal red-green-blue values (RRGGBB).\n The entire set of HTML-standard color names is available.\n Try "showcolormap" to see the available named colors!'
## show available matplotlib styles
def do_showstyles(self, arg):
"""
Show the available plot styles.
:param arg: the command-line input
"""
try:
if stylesLoaded:
ss = list()
for uc in pydvif.get_styles():
ss.append(uc.encode('ascii', 'ignore'))
print '\n'
self.print_topics('Style Names (type style <style-name>):', ss, 15, 80)
else:
print '\nNo styles available. Try using a newer version of Python.\n'
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_showstyles(self):
print '\n Procedure: show the available pre-defined styles provided by matplotlib.\nUsage: showstyles\n'
## show color map
def do_showcolormap(self, line):
try:
ax = plt.gca()
plt.cla() # wipe current axes
ratio = 1.0 / 3.0
count = ceil(sqrt(len(mclr.cnames)))
x_count = count * ratio
y_count = count / ratio
x = 0
y = 0
w = 1 / x_count
h = 1 / y_count
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
from matplotlib.collections import PatchCollection
patches = []
for c in mclr.cnames:
pos = (x / x_count, y / y_count)
rectangle = plt.Rectangle(pos, w, h, color=c)
patches.append(rectangle)
ax.add_artist(rectangle) # needed for colors to show up, sigh :-(
ax.annotate(c, xy=pos)
if y >= y_count-1:
x += 1
y = 0
else:
y += 1
p = PatchCollection(patches)
ax.add_collection(p)
plt.draw()
self.plotter.canvas.draw()
print 'hit return to go back to your plots'
x = raw_input()
self.plotedit=True
except:
print 'error - usage: showcolormap'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_showcolormap(self):
print '\n Procedure: show the available named colors\n Usage: showcolormap\n Hit <return> after viewing to go back to regular plotting'
##scale curve x values by given factor##
def do_mx(self, line):
try:
self.modcurve(line, 'mx', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: mx <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_mx(self):
print '\n Procedure: Scale x values of curves by a constant\n Usage: mx <curve-list> <value>\n'
##scale curve x values by given factor##
def do_divx(self, line):
try:
self.modcurve(line, 'divx', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: divx <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_divx(self):
print '\n Procedure: Divide x values of curves by a constant\n Usage: divx <curve-list> <value>\n'
##scale curve y values by given factor##
def do_my(self, line):
try:
self.modcurve(line, 'my', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: my <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_my(self):
print '\n Procedure: Scale y values of curves by a constant\n Usage: my <curve-list> <value>\n'
##scale curve y values by given factor##
def do_divy(self, line):
try:
self.modcurve(line, 'divy', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: divy <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_divy(self):
print '\n Procedure: Divide y values of curves by a constant\n Usage: divy <curve-list> <value>\n'
##shift curve x values by given factor##
def do_dx(self, line):
try:
self.modcurve(line, 'dx', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: dx <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_dx(self):
print '\n Procedure: Shift x values of curves by a constant\n Usage: dx <curve-list> <value>\n'
##shift curve y values by given factor##
def do_dy(self, line):
try:
self.modcurve(line, 'dy', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: dy <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_dy(self):
print '\n Procedure: Shift y values of curves by a constant\n Usage: dy <curve-list> <value>\n'
## take L2 norm of two curves
def do_L2(self, line):
try:
"""take L2 norm of two curves given as args"""
args = line.strip().split()
if len(args) != 2 and len(args) != 4:
raise RuntimeError, "wrong number of args to L2"
# put a '2' in between curves and xmin, xmax,
# to indicate the order of norm to take
args.insert(2, '2')
# put args back into a line
line = string.join(args)
# then pass to usual "norm" command
self.do_norm(line)
except:
print 'error - usage: L2 <curve> <curve> [<xmin> <xmax>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_L2(self):
print '\n Procedure: makes new curve that is the L2 norm of two args; the L2-norm is \n (integral( (curve1 - curve2)**2 ) )**(1/2) over the interval [xmin, xmax] .\n Usage: L2 <curve> <curve> [<xmin> <xmax>]\n Also prints value of integral to command line.\n'
## take L1 norm of two curves
def do_L1(self, line):
try:
"""take L1 norm of two curves given as args"""
args = line.strip().split()
if len(args) != 2 and len(args) != 4:
raise RuntimeError, "wrong number of args to L1"
# put a '1' in between curves and xmin, xmax,
# to indicate the order of norm to take
args.insert(2, '1')
# put args back into a line
line = string.join(args)
# then pass to usual "norm" command
self.do_norm(line)
except:
print 'error - usage: L1 <curve> <curve> [<xmin> <xmax>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_L1(self):
print '\n Procedure: makes new curve that is the L1 norm of two args; the L1-norm is \n integral( |curve1 - curve2| ) over the interval [xmin, xmax] .\n Usage: L1 <curve> <curve> [<xmin> <xmax>]\n Also prints value of integral to command line.\n'
## take arbitrary order norm of two curves
def do_norm(self, line):
try:
"""take norm of order N of two curves given as args"""
args = line.strip().split()
if len(args) != 3 and len(args) != 5:
raise RuntimeError, "wrong number of args to norm"
# curves a and b will be our operands
a = self.curvefromlabel(args[0])
b = self.curvefromlabel(args[1])
c = a - b # new numpy.array object
c.y = abs(c.y) # absolute value
if args[2].lower() != "inf":
N = string.atof(args[2]) # order of the norm
c = c**N
if len(args) == 5:
xmin = string.atof(args[3])
xmax = string.atof(args[4])
if xmax <= xmin:
raise RuntimeError, "xmin > xmax or xmin == xmax in do_norm()"
else:
xmin = min(c.x)
xmax = max(c.x)
if args[2].lower() == "inf":
Linf = 0.0
for xi, yi in zip(c.x, c.y):
if xi >= xmin and xi <= xmax:
Linf = max(Linf, yi)
print "Linf norm = ", Linf
d = c
d.y = numpy.array([Linf]*c.y.shape[0])
d.name = "Linf of " + a.plotname + " and " + b.plotname
else:
d = self.integrate(c,xmin,xmax) # d = integral( c**N )
d = d**(1.0/N)
print "L%d norm = " % N, max(d.y)
d.name = "L%d of " % N + a.plotname + " and " + b.plotname
self.addtoplot(d)
except:
print 'error - usage: norm <curve> <curve> <p> [<xmin> <xmax>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_norm(self):
print '\n Procedure: makes new curve that is the norm of two args; the p-norm is \n (integral( (curve1 - curve2)**p ) )**(1/p) over the interval [xmin, xmax] .\n Usage: norm <curve> <curve> <p> [<xmin> <xmax>]\n where p=order. Also prints value of integral to command line.\n'
## make a new curve - the max of the specified curves ##
def do_max(self, line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_max(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
if len(line) < 2:
return
curves = list()
for i in range(len(self.plotlist)):
for j in range(len(line)):
if self.plotlist[i].plotname == line[j].upper():
curves.append(self.plotlist[i])
break
nc = pydvif.max_curve(curves)
if nc is not None:
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_max()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_max(self):
print('\n Procedure: makes new curve with max y values of curves.\n Usage: max <curve-list>\n')
## make a new curve - the min of the specified curves ##
def do_min(self, line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_min(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
if len(line) < 2:
return
curves = list()
for i in range(len(self.plotlist)):
for j in range(len(line)):
if self.plotlist[i].plotname == line[j].upper():
curves.append(self.plotlist[i])
break
nc = pydvif.min_curve(curves)
if nc is not None:
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_min()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_min(self):
print('\n Procedure: makes new curve with min y values of curves.\n Usage: min <curve-list>\n')
## make a new curve - the average of the specified curves ##
def do_average(self, line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_average(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
if len(line) < 2:
return
curves = list()
for i in range(len(self.plotlist)):
for j in range(len(line)):
if self.plotlist[i].plotname == line[j].upper():
curves.append(self.plotlist[i])
break
nc = pydvif.average_curve(curves)
if nc is not None:
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_average()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_average(self):
print '\n Procedure: Average the specified curvelist over the intersection of their domains.' \
'\n Usage: average <curvelist>\n'
## fit a curve with a polynomial function ##
def do_fit(self, line):
try:
"""fit curve to line: usage is 'fit curve [n] [logx] [logy]', where n=order of fit, default is linear"""
print "fitting curve:", line
args = line.strip().split()
if len(args) == 0 or len(args) > 4 :
raise RuntimeError, "wrong number of args to fit"
c = self.curvefromlabel(args[0])
logx, logy = False, False
if "logx" in args:
logx = True
args.remove("logx")
if "logy" in args:
logy = True
args.remove("logy")
assert len(args) in (1, 2)
if len(args) == 2:
n = string.atof(args[1])
else:
n = 1
nc = pydvif.fit(c, n, logx, logy)
nc.plotname = self.getcurvename()
self.addtoplot(nc)
except:
print 'error - usage: fit <curve> [n] [logx] [logy]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_fit(self):
print '\n Procedure: make new curve that is polynomial fit to argument.\n Usage: fit <curve> [n] [logx] [logy]\n n=1 by default, logy means take log(y-values) before fitting, \n logx means take log(x-values) before fitting\n'
##return x values on curves at y value##
def do_getx(self, line):
try:
self.modcurve(line, 'getx', [line.split()[-1]])
print ''
except:
print 'error - usage: getx <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getx(self):
print '\n Procedure: Return x values for a given y\n Usage: getx <curve-list> <y-value>\n'
##return y values on curves at x value##
def do_gety(self, line):
try:
self.modcurve(line, 'gety', [line.split()[-1]])
print ''
except:
print 'error - usage: gety <curve-list> <value>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_gety(self):
print '\n Procedure: Return y values for a given x\n Usage: gety <curve-list> <x-value>\n'
##get the range of the given curves##
def do_getrange(self, line):
try:
if(len(line.split(':')) > 1):
self.do_getrange(pdvutil.getletterargs(line))
return 0
else:
print '\n Get Range'
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if(cur.plotname == line[i].upper()):
plotname, miny, maxy = pydvif.getrange(cur)[0]
print '\nCurve ' + plotname
print ' ymin: %.6e ymax: %.6e' % (miny, maxy)
print ''
except:
print 'error - usage: getrange <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getrange(self):
print '\n Procedure: Return range of curves\n Usage: getrange <curve-list>\n Shortcuts: get-range\n'
##get the domain of the given curves##
def do_getdomain(self, line):
try:
if(len(line.split(':')) > 1):
self.do_getdomain(pdvutil.getletterargs(line))
return 0
else:
print '\n Get Domain'
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
plotname, minx, maxx = pydvif.getdomain(cur)[0]
print '\nCurve ' + plotname
print ' xmin: %.6e xmax: %.6e' % (minx, maxx)
print ''
except:
print 'error - usage: getdomain <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getdomain(self):
print '\n Procedure: Return domain of curves\n Usage: getdomain <curve-list>\n Shortcuts: get-domain\n'
##get the max y-value for the given curve##
def do_getymax(self, line):
if not line:
return 0
try:
line = line.split()
xlow = None
xhi = None
try:
xhi = float(line[-1])
except:
xhi = None
try:
xlow = float(line[-2])
except:
xlow = None
if (xlow is None and xhi is not None) or (xlow is not None and xhi is None):
raise RuntimeError, "<xmin> and <xmax> must BOTH be specified"
for i in range(len(self.plotlist)):
cur = self.plotlist[i]
if cur.plotname == line[0].upper():
plotname, maxy = pydvif.getymax(cur, xlow, xhi)
print '\nCurve ' + plotname
print ' ymax: %.6f' % maxy
print ''
except:
print 'error - usage: getymax <curve> [<xmin> <xmax>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getymax(self):
print '\n Procedure: Return the maximum y-value for the curve within the specified domain.\n Usage: getymax <curve> [<xmin> <xmax>]\n'
##get the min y-value for the given curve##
def do_getymin(self, line):
if not line:
return 0
try:
line = line.split()
xlow = None
xhi = None
try:
xhi = float(line[-1])
except:
xhi = None
try:
xlow = float(line[-2])
except:
xlow = None
if (xlow is None and xhi is not None) or (xlow is not None and xhi is None):
raise RuntimeError, "<xmin> and <xmax> must BOTH be specified"
for i in range(len(self.plotlist)):
cur = self.plotlist[i]
if cur.plotname == line[0].upper():
plotname, miny = pydvif.getymin(cur, xlow, xhi)
print '\nCurve ' + plotname
print ' ymin: %.6f' % miny
print ''
except:
print 'error - usage: getymin <curve> [<xmin> <xmax>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getymin(self):
print '\n Procedure: Return the minimum y-value for the curve within the specified domain.\n Usage: getymin <curve> [<xmin> <xmax>]\n'
##get the label of a given curve##
def do_getlabel(self, line):
try:
line = line.split()
for i in range(len(self.plotlist)):
cur = self.plotlist[i]
if cur.plotname == line[0].upper():
print "\nLabel = %s\n" % cur.name
except:
print 'error - usage: getlabel <curve>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getlabel(self):
print "\n Procedure: Return the given curve's label\n Usage: getlabel <curve>\n"
##Display the y-values in the specified curves##
def do_disp(self, line):
try:
if(len(line.split(':')) > 1):
self.do_disp(pdvutil.getletterargs(line))
return 0
else:
print '\n'
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
dispname, ss = pydvif.disp(cur, False)
self.print_topics('Curve %s: %s' % (dispname, cur.name), ss, 15, 100)
except:
print 'error - usage: disp <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_disp(self):
print '\n Procedure: Display the y-values in the specified curve(s). \n Usage: disp <curve-list>\n'
##Display the x-values in the specified curves##
def do_dispx(self, line):
try:
if(len(line.split(':')) > 1):
self.do_dispx(pdvutil.getletterargs(line))
return 0
else:
print '\n'
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
dispname, ss = pydvif.disp(cur)
self.print_topics('Curve %s: %s' % (dispname, cur.name), ss, 15, 100)
except:
print 'error - usage: dispx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_dispx(self):
print '\n Procedure: Display the x-values in the specified curve(s). \n Usage: dispx <curve-list>\n'
## Display the number of points for the given curve ##
def do_getnumpoints(self, line):
try:
line = line.split()
found = False
for i in range(len(self.plotlist)):
cur = self.plotlist[i]
if cur.plotname == line[0].upper():
print '\n Number of points = %d\n' % pydvif.getnumpoints(cur)
found = True
break
if not found:
print '\nerror - Curve %s does not exist - usage: getnumpoints <curve>\n' % line[0]
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_getnumpoints(self):
print ('\n Display the given curve\'s number of points.\n usage: getnumpoints <curve>')
##modify curve to absolute all y values##
def do_abs(self, line):
try:
self.func_curve(line, 'abs', 0)
self.plotedit = True
except:
print 'error - usage: abs <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_abs(self):
print '\n Procedure: Take absolute value of y values of curves\n Usage: abs <curve-list>\n'
def do_absx(self, line):
try:
self.func_curve(line, 'abs', 1)
self.plotedit = True
except:
print 'error - usage: absx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_absx(self):
print '\n Procedure: Take absolute value of x values of curves\n Usage: absx <curve-list>\n'
## take the natural logarithm of the curve y-values##
def do_log(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_log(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
keepnegs = True
if line[-1].upper() == 'TRUE' or line[-1].upper() == 'FALSE':
keepnegs = keepnegs == line.pop(-1)
curves = list()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
break
pydvif.log(curves, keepnegs)
self.plotedit = True
except:
print 'error - usage: log <curve-list> [keep-neg-vals: True | False]'
if self.debug:
print traceback.print_exc(file=sys.stdout)
def help_log(self):
print '\n Procedure: take natural logarithm of y-values of curves.\n' \
' If the optional argument keep-neg-vals is set to False, then zero and negative y-values will be discarded. keep-neg-vals is True by default.\n' \
' Usage: log <curve-list> [keep-neg-vals: True | False]\n Shortcut: ln\n'
## take the natural logarithm of the curve x-values ##
def do_logx(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_logx(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
keepnegs = True
if line[-1].upper() == 'TRUE' or line[-1].upper() == 'FALSE':
keepnegs = keepnegs == line.pop(-1)
curves = list()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
break
pydvif.logx(curves, keepnegs)
self.plotedit = True
except:
print 'error - usage: logx <curve-list> [keep-neg-vals: True | False]'
if self.debug:
print traceback.print_exc(file=sys.stdout)
def help_logx(self):
print '\n Procedure: take natural logarithm of x-values of curves.\n' \
' If the optional argument keep-neg-vals is set to False, then zero and negative x-values will be discarded. keep-neg-vals is True by default.\n' \
' Usage: logx <curve-list> [keep-neg-vals: True | False]\n Shortcut: lnx\n'
## take the base 10 logarithm of the curve y-values##
def do_log10(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_log10(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
keepnegs = True
if line[-1].upper() == 'TRUE' or line[-1].upper() == 'FALSE':
keepnegs = keepnegs == line.pop(-1)
curves = list()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
break
pydvif.log10(curves, keepnegs)
self.plotedit = True
except:
print 'error - usage: log10 <curve-list> [keep-neg-vals: True | False]'
if self.debug:
print traceback.print_exc(file=sys.stdout)
def help_log10(self):
print '\n Procedure: take base 10 logarithm of y values of curves.\n' \
' If the optional argument keep-neg-vals is set to False, then zero and negative x-values will be discarded. keep-neg-vals is True by default.\n' \
' Usage: log10 <curve-list> [keep-neg-vals: True | False]'
## take the base 10 logarithm of the curve x-values##
def do_log10x(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_log10x(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
keepnegs = True
if line[-1].upper() == 'TRUE' or line[-1].upper() == 'FALSE':
keepnegs = keepnegs == line.pop(-1)
curves = list()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
break
pydvif.log10x(curves, keepnegs)
self.plotedit = True
except:
print 'error - usage: log10x <curve-list> [keep-neg-vals: True | False]'
if self.debug:
print traceback.print_exc(file=sys.stdout)
def help_log10x(self):
print '\n Procedure: take base 10 logarithm of x values of curves.\n' \
' If the optional argument keep-neg-vals is set to False, then zero and negative x-values will be discarded. keep-neg-vals is True by default.\n' \
' Usage: log10x <curve-list> [keep-neg-vals: True | False]'
## exponentiate the curve##
def do_exp(self, line):
try:
self.func_curve(line, 'exp', 0)
self.plotedit = True
except:
print 'error - usage: exp <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_exp(self):
print '\n Procedure: e**y, exponentiate y values of curves\n Usage: exp <curve-list>\n'
def do_expx(self, line):
try:
self.func_curve(line, 'exp', 1)
self.plotedit = True
except:
print 'error - usage: expx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_expx(self):
print '\n Procedure: e**y, exponentiate x values of curves\n Usage: expx <curve-list>\n'
##take the cosine of the curve##
def do_cos(self, line):
try:
self.func_curve(line, 'cos', 0)
self.plotedit = True
except:
print 'error - usage: cos <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_cos(self):
print '\n Procedure: Take cosine of y values of curves\n Usage: cos <curve-list>\n'
def do_cosx(self, line):
try:
self.func_curve(line, 'cos', 1)
self.plotedit = True
except:
print 'error - usage: cos <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_cosx(self):
print '\n Procedure: Take cosine of x values of curves\n Usage: cos <curve-list>\n'
##take the sine of the curve##
def do_sin(self, line):
try:
self.func_curve(line, 'sin', 0)
self.plotedit = True
except:
print 'error - usage: sin <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sin(self):
print '\n Procedure: Take sine of y values of curves\n Usage: sin <curve-list>\n'
def do_sinx(self, line):
try:
self.func_curve(line, 'sin', 1)
self.plotedit = True
except:
print 'error - usage: sinx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sinx(self):
print '\n Procedure: Take sine of x values of curves\n Usage: sinx <curve-list>\n'
##take the tangent of the curve##
def do_tan(self, line):
try:
self.func_curve(line, 'tan', 0)
self.plotedit = True
except:
print 'error - usage: tan <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_tan(self):
print '\n Procedure: Take tangent of y values of curves\n Usage: tan <curve-list>\n'
def do_tanx(self, line):
try:
self.func_curve(line, 'tan', 1)
self.plotedit = True
except:
print 'error - usage: tanx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_tanx(self):
print '\n Procedure: Take tangent of x values of curves\n Usage: tanx <curve-list>\n'
##take the arccosine of the curve##
def do_acos(self, line):
try:
self.func_curve(line, 'acos', 0)
self.plotedit = True
except:
print 'error - usage: acos <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_acos(self):
print '\n Procedure: Take arccosine of y values of curves\n Usage: acos <curve-list>\n'
def do_acosx(self, line):
try:
self.func_curve(line, 'acos', 1)
self.plotedit = True
except:
print 'error - usage: acosx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_acosx(self):
print '\n Procedure: Take arccosine of x values of curves\n Usage: acosx <curve-list>\n'
##take the arcsine of the curve##
def do_asin(self, line):
try:
self.func_curve(line, 'asin', 0)
self.plotedit = True
except:
print 'error - usage: asin <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_asin(self):
print '\n Procedure: Take arcsine of y values of curves\n Usage: asin <curve-list>\n'
def do_asinx(self, line):
"""
Take arcsine of x values of curves.
:param line: User Command-Line Input (arsinx <curve-list>))
:type line: string
"""
try:
self.func_curve(line, 'asin', 1)
self.plotedit = True
except:
print 'error - usage: asinx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_asinx(self):
print '\n Procedure: Take arcsine of x values of curves\n Usage: asinx <curve-list>\n'
##take the arctangent of the curve##
def do_atan(self, line):
try:
self.func_curve(line, 'atan', 0)
self.plotedit = True
except:
print 'error - usage: atan <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_atan(self):
print '\n Procedure: Take arctangent of y values of curves\n Usage: atan <curve-list>\n'
def do_atanx(self, line):
try:
self.func_curve(line, 'atan', 1)
self.plotedit = True
except:
print 'error - usage: atanx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_atanx(self):
print '\n Procedure: Take arctangent of x values of curves\n Usage: atanx <curve-list>\n'
##peform the atan2 method for a pair of curves##
# Note we currently only support atan2 for two distinct curves.
def do_atan2(self, line):
try:
letterargs = [x.upper() for x in line.split()]
assert len(letterargs) == 2
a, b = None, None
for p in self.plotlist:
if p.plotname == letterargs[0]:
a = p
elif p.plotname == letterargs[1]:
b = p
assert a and b
c = pydvif.atan2(a, b, tuple(letterargs))
self.addtoplot(c)
self.plotedit = True
except:
print 'error - usage: atan curve1 curve2'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_atan2(self):
print '\n Procedure: Take atan2 of two curves\n Usage: atan2 curve1 curve2\n'
##take the hyperbolic cosine of the curve##
def do_cosh(self, line):
try:
self.func_curve(line, 'cosh', 0)
self.plotedit = True
except:
print 'error - usage: cosh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_cosh(self):
print '\n Procedure: Take hyperbolic cosine of y values of curves\n Usage: cosh <curve-list>\n'
def do_coshx(self, line):
try:
self.func_curve(line, 'cosh', 1)
self.plotedit = True
except:
print 'error - usage: coshx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_coshx(self):
print '\n Procedure: Take hyperbolic cosine of x values of curves\n Usage: coshx <curve-list>\n'
##take the hyperbolic sine of the curve##
def do_sinh(self, line):
try:
self.func_curve(line, 'sinh', 0)
self.plotedit = True
except:
print 'error - usage: sinh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sinh(self):
print '\n Procedure: Take hyperbolic sine of y values of curves\n Usage: sinh <curve-list>\n'
def do_sinhx(self, line):
try:
self.func_curve(line, 'sinh', 1)
self.plotedit = True
except:
print 'error - usage: sinhx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sinhx(self):
print '\n Procedure: Take hyperbolic sine of x values of curves\n Usage: sinhx <curve-list>\n'
##take the hyperbolic tangent of the curve##
def do_tanh(self, line):
try:
self.func_curve(line, 'tanh', 0)
self.plotedit = True
except:
print 'error - usage: tanh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_tanh(self):
print '\n Procedure: Take hyperbolic tangent of y values of curves\n Usage: tanh <curve-list>\n'
def do_tanhx(self, line):
try:
self.func_curve(line, 'tanh', 1)
self.plotedit = True
except:
print 'error - usage: tanhx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_tanhx(self):
print '\n Procedure: Take hyperbolic tangent of x values of curves\n Usage: tanhx <curve-list>\n'
##take the inverse hyperbolic cosine of the curve##
def do_acosh(self, line):
try:
self.func_curve(line, 'acosh', 0)
self.plotedit = True
except:
print 'error - usage: acosh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_acosh(self):
print '\n Procedure: Take hyperbolic arccosine of y values of curves\n Usage: acosh <curve-list>\n'
def do_acoshx(self, line):
try:
self.func_curve(line, 'acosh', 1)
self.plotedit = True
except:
print 'error - usage: acoshx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_acoshx(self):
print '\n Procedure: Take hyperbolic arccosine of x values of curves\n Usage: acoshx <curve-list>\n'
##take the hyperbolic arcsine of the curve##
def do_asinh(self, line):
try:
self.func_curve(line, 'asinh', 0)
self.plotedit = True
except:
print 'error - usage: asinh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_asinh(self):
print '\n Procedure: Take hyperbolic arcsine of y values of curves\n Usage: asinh <curve-list>\n'
def do_asinhx(self, line):
try:
self.func_curve(line, 'asinh', 1)
self.plotedit = True
except:
print 'error - usage: asinhx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_asinhx(self):
print '\n Procedure: Take hyperbolic arcsine of x values of curves\n Usage: asinhx <curve-list>\n'
##take the hyperbolic arctangent of the curve##
def do_atanh(self, line):
try:
self.func_curve(line, 'atanh', 0)
self.plotedit = True
except:
print 'error - usage: atanh <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_atanh(self):
print '\n Procedure: Take hyperbolic arctangent of y values of curves\n Usage: atanh <curve-list>\n'
def do_atanhx(self, line):
try:
self.func_curve(line, 'atanh', 1)
self.plotedit = True
except:
print 'error - usage: atanhx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_atanhx(self):
print '\n Procedure: Take hyperbolic arctangent of x values of curves\n Usage: atanhx <curve-list>\n'
##take the zeroth order Bessel function of the curve##
def do_j0(self, line):
try:
self.func_curve(line, 'j0', 0)
self.plotedit = True
except:
print 'error - usage: j0 <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_j0(self):
print '\n Procedure: Take the zeroth order Bessel function of y values of curves\n Usage: j0 <curve-list>\n'
def do_j0x(self, line):
try:
self.func_curve(line, 'j0', 1)
self.plotedit = True
except:
print 'error - usage: j0x <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_j0x(self):
print '\n Procedure: Take the zeroth order Bessel function of x values of curves\n Usage: j0x <curve-list>\n'
##take the first order Bessel function of the curve##
def do_j1(self, line):
try:
self.func_curve(line, 'j1', 0)
self.plotedit = True
except:
print 'error - usage: j1 <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_j1(self):
print '\n Procedure: Take the first order Bessel function of y values of curves\n Usage: j1 <curve-list>\n'
def do_j1x(self, line):
try:
self.func_curve(line, 'j1', 1)
self.plotedit = True
except:
print 'error - usage: j1x <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_j1x(self):
print '\n Procedure: Take the first order Bessel function of x values of curves\n Usage: j1x <curve-list>\n'
##take the nth order Bessel function of the curve##
def do_jn(self, line):
try:
print "do_jn: "+line+" '"+line.split()[-1]+"' "
self.func_curve(line, 'jn', 0, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: jn <curve-list> n'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_jn(self):
print '\n Procedure: Take the nth order Bessel function of y values of curves\n Usage: jn <curve-list> n\n'
def do_jnx(self, line):
try:
self.func_curve(line, 'jn', 1, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: jnx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_jnx(self):
print '\n Procedure: Take the nth order Bessel function of x values of curves\n Usage: jnx <curve-list> n\n'
##take the zeroth order Bessel function of the second kind of the curve##
def do_y0(self, line):
try:
self.func_curve(line, 'y0', 0)
self.plotedit = True
except:
print 'error - usage: y0 <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_y0(self):
print '\n Procedure: Take the zeroth order Bessel function of the second kind of y values of curves\n Usage: y0 <curve-list>\n'
def do_y0x(self, line):
try:
self.func_curve(line, 'y0', 1)
self.plotedit = True
except:
print 'error - usage: y0x <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_y0x(self):
print '\n Procedure: Take the zeroth order Bessel function of the second kind of x values of curves\n Usage: y0x <curve-list>\n'
##take the first order Bessel function of the second kind of the curve##
def do_y1(self, line):
try:
self.func_curve(line, 'y1', 0)
self.plotedit = True
except:
print 'error - usage: y1 <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_y1(self):
print '\n Procedure: Take the first order Bessel function of the second kind of y values of curves\n Usage: y1 <curve-list>\n'
def do_y1x(self, line):
try:
self.func_curve(line, 'y1', 1)
self.plotedit = True
except:
print 'error - usage: y1x <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_y1x(self):
print '\n Procedure: Take the first order Bessel function of the second kind of x values of curves\n Usage: y1x <curve-list>\n'
##take the nth order Bessel function of the second kind of the curve##
def do_yn(self, line):
try:
self.func_curve(line, 'yn', 0, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: yn <curve-list> n'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_yn(self):
print '\n Procedure: Take the nth order Bessel function of the second kind of y values of curves\n Usage: yn <curve-list> <n>\n'
def do_ynx(self, line):
try:
self.func_curve(line, 'yn', 1, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: ynx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ynx(self):
print '\n Procedure: Take the nth order Bessel function of the second kind of x values of curves\n Usage: ynx <curve-list> <n>\n'
##Raise a fixed value, a, to the power of the y values of the curves
def do_powa(self, line):
try:
self.func_curve(line, 'powa', 0, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: powa <curve-list> a'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_powa(self):
print '\n Procedure: Raise a fixed value, a, to the power of the y values of the curves\n Usage: powa <curve-list> a\n'
def do_powax(self, line):
try:
self.func_curve(line, 'powa', 1, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: powax <curve-list> a'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_powax(self):
print '\n Procedure: Raise a fixed value, a, to the power of the x values of the curves\n Usage: powax <curve-list> a\n'
##Raise the y values of the curves to a fixed power
def do_powr(self, line):
try:
self.func_curve(line, 'powr', 0, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: powr <curve-list> a'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_powr(self):
print '\n Procedure: Raise the y values of the curves to a fixed power, y=y^p\n Usage: power <curve-list> p\n Shortcuts: pow , powr\n'
def do_powrx(self, line):
try:
self.func_curve(line, 'powr', 1, [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: powrx <curve-list> a'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_powrx(self):
print '\n Procedure: Raise the x values of the curves to a fixed power, x=x^p\n Usage: powerx <curve-list> p\n Shortcuts: powx , powrx\n'
##Take the reciprocal of the y values of the curves
def do_recip(self, line):
try:
self.func_curve(line, 'recip', 0)
self.plotedit = True
except:
print 'error - usage: recip <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_recip(self):
print '\n Procedure: Take the reciprocal of the y values of the curves\n Usage: recip <curve-list>\n'
def do_recipx(self, line):
try:
self.func_curve(line, 'recip', 1)
self.plotedit = True
except:
print 'error - usage: recipx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_recipx(self):
print '\n Procedure: Take the reciprocal of the x values of the curves\n Usage: recipx <curve-list>\n'
##Take the square of the y values of the curves
def do_sqr(self, line):
try:
self.func_curve(line, 'sqr', 0)
self.plotedit = True
except:
print 'error - usage: sqr <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sqr(self):
print '\n Procedure: Take the square of the y values of the curves\n Usage: square <curve-list>\n Shortcut: sqr\n'
def do_sqrx(self, line):
try:
self.func_curve(line, 'sqr', 1)
self.plotedit = True
except:
print 'error - usage: sqrx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sqrx(self):
print '\n Procedure: Take the square of the x values of the curves\n Usage: squarex <curve-list>\n Shortcut: sqrx\n'
##Take the square root of the y values of the curves
##Take the square root of the y values of the curves
def do_sqrt(self, line):
try:
self.func_curve(line, 'sqrt', 0)
self.plotedit = True
except:
print 'error - usage: sqrt <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sqrt(self):
print '\n Procedure: Take the square root of the y values of the curves\n Usage: sqrt <curve-list>\n'
def do_sqrtx(self, line):
try:
self.func_curve(line, 'sqrt', 1)
self.plotedit = True
except:
print 'error - usage: sqrtx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_sqrtx(self):
print '\n Procedure: Take the square root of the x values of the curves\n Usage: sqrtx <curve-list>\n'
##set labels and titles##
def do_xlabel(self, line):
try:
self.xlabel = line
plt.xlabel(line)
except:
print 'error - usage: xlabel <label-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xlabel(self):
print '\n Procedure: Set a label for the x axis\n Usage: xlabel <label-name>\n'
def do_ylabel(self, line):
try:
self.ylabel = line
plt.ylabel(line)
except:
print 'error - usage: ylabel <label-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ylabel(self):
print '\n Procedure: Set a label for the y axis\n Usage: ylabel <label-name>\n'
def do_title(self, line):
try:
self.title = line
plt.title(line)
except:
print 'error - usage: title <title-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_title(self):
print '\n Procedure: Set a title for the plot\n Usage: title <title-name>\n'
##show or hide the key/legend##
def do_legend(self, line):
try:
locs = {'best':0, 'ur':1, 'ul':2, 'll':3, 'lr':4, 'cl':6, 'cr':7, 'lc':8, 'uc':9, 'c':10}
line = line.strip().split()
for i in range(len(line)):
key = line[i].lower()
if locs.has_key(key):
self.key_loc = locs[key]
elif key == 'on':
self.showkey = True
elif key == 'off':
self.showkey = False
else:
try:
self.key_ncol = int(key)
except:
raise Exception, 'Invalid argument: %s' % key
except:
print 'error - usage: legend [on | off] [<position>] [<number of columns>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_legend(self):
print '\n Variable: Show the legend if True. Set legend position as best, ur, ul, ll, lr, cl, cr, uc, lc, c\n Usage: legend [on | off] [<position>] [<number of columns>]\n Shortcuts: leg, key\n'
## adjust the width of the label column in 'menu' and 'lst' commands
def do_namewidth(self, line):
try:
if len(line) == 0:
print 'label column width is currently', self.namewidth
else:
line = line.strip().split()
width = int(line[0])
if width < 0:
width = 0
self.namewidth = width
print 'changing label column width to ', self.namewidth
except:
print 'error - usage: namewidth <integer>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_namewidth(self):
print '\n Command: change the width of the first column of the menu and lst output\n Usage: namewidth <integer>'
##adjust the length of the lines in the legend##
def do_handlelength(self, line):
try:
key = line.strip().split()[0]
if key.upper() == "NONE":
self.handlelength = None
else:
self.handlelength = max(0, int(key))
self.plotedit = True
except:
print 'error -- usage:'
self.help_handlelength()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_handlelength(self):
print '\n Command: change the length of the lines in the legend'
print ' Usage: handlelength <integer>'
##show or hide the grid##
def do_grid(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
self.showgrid = False
elif(line == '1' or line.upper() == 'ON'):
self.showgrid = True
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: grid on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_grid(self):
print '\n Variable: Show the grid if True\n Usage: grid on | off\n'
##show or hide letter markers on plotted curves##
def do_dataid(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
self.showletters = False
elif(line == '1' or line.upper() == 'ON'):
self.showletters = True
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: dataid on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_dataid(self):
print '\n Variable: Show curve identifiers if True\n Usage: dataid on | off\n Shortcuts: data-id\n'
##show the x axis on a log scale##
def do_xlogscale(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
self.xlogscale = False
elif(line == '1' or line.upper() == 'ON'):
self.xlogscale = True
plt.xscale('log')
if self.xlim is not None:
xmin = max(1e-2, self.xlim[0])
self.xlim = (xmin, max(self.xlim[1], 1000.0*xmin))
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: xlogscale on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xlogscale(self):
print '\n Variable: Show x axis in log scale if True\n Usage: xlogscale on | off\n Shortcuts: x-log-scale , xls\n'
##show the y axis on a log scale##
def do_ylogscale(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
self.ylogscale = False
elif(line == '1' or line.upper() == 'ON'):
self.ylogscale = True
plt.yscale('log')
if self.ylim is not None:
ymin = max(1e-2, self.ylim[0])
self.ylim = (ymin, max(self.ylim[1], 1000.0*ymin))
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: ylogscale on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ylogscale(self):
print '\n Variable: Show y axis in log scale if True\n Usage: ylogscale on | off\n Shortcuts: y-log-scale , yls\n'
##set whether to update after each command##
def do_guilims(self, line):
try:
line = line.strip()
if line == '0' or line.upper() == 'OFF':
self.guilims = False
self.xlim = None
self.ylim = None
elif line == '1' or line.upper() == 'ON':
self.guilims = True
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: guilims on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_guilims(self):
print '\n Variable: Set whether or not to use the GUI min/max values for the X and Y limits. Default is off.\n Usage: guilims on | off\n'
##set whether to update after each command##
def do_update(self, line):
try:
line = line.strip()
if line == '0' or line.upper() == 'OFF':
self.update = False
elif line == '1' or line.upper() == 'ON':
self.update = True
else:
print 'invalid input: requires on or off as argument'
except:
print 'error - usage: update on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_update(self):
print '\n Variable: Update the plot after each command if True\n Usage: update on | off\n'
##set whether or not to use LaTeX font rendering##
def do_latex(self, line):
try:
line = line.strip()
if(line == '0' or line.upper() == 'OFF'):
matplotlib.rc('text', usetex=False)
elif(line == '1' or line.upper() == 'ON'):
matplotlib.rc('text', usetex=True)
else:
print 'invalid input: requires on or off as argument'
except:
print 'latex on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_latex(self):
print '\n Variable: Use LaTeX font rendering if True\n Usage: latex on | off\n'
##show given curves as points rather than continuous line##
def do_scatter(self, line):
try:
self.modcurve(line, 'scatter', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: scatter <curve-list> on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_scatter(self):
print '\n Procedure: Plot curves as scatter plots\n Usage: scatter <curve-list> on | off\n'
##show given curves as points and a line rather than continuous line##
def do_linespoints(self, line):
try:
self.modcurve(line, 'linespoints', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: linespoints <curve-list> on | off'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_linespoints(self):
print '\n Procedure: Plot curves as linespoints plots\n Usage: linespoints <curve-list> on | off\n'
##set line width of given curves##
def do_lnwidth(self, line):
try:
self.modcurve(line, 'lnwidth', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: lnwidth <curve-list> <width-number>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_lnwidth(self):
print '\n Procedure: Set the line widths of curves\n Usage: lnwidth <curve-list> <width-number>\n'
##set line style of given curves##
def do_lnstyle(self, line):
try:
self.modcurve(line, 'lnstyle', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: lnstyle <curve-list> <style: solid | dash | dot | dotdash>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_lnstyle(self):
print '\n Procedure: Set the line style of curves\n Usage: lnstyle <curve-list> <style: solid | dash | dot | dotdash>\n'
##set draw style of given curves##
def do_drawstyle(self, line):
try:
self.modcurve(line, 'drawstyle', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: drawstyle <curve-list> <style: default | steps | steps-pre | steps-post | steps-mid>'
if(self.debug):
traceback.print_exc(file=sys.stdout)
def help_drawstyle(self):
print '\n Procedure: Set the draw style of the curves\n Usage: drawstyle <curve-list> <style: default | steps | steps-pre | steps-post | steps-mid>\n'
##set dash style of given curves##
def help_dashstyle(self):
print '''
Procedure: Set the style of dash or dot dash lines
Usage: dashstyle <curve-list> <[...]>
The python list is a list of integers, alternating how many pixels to turn on and off, for example:
[2, 2] : Two pixels on, two off (will result in a dot pattern).
[4, 2, 2, 2] : 4 on, 2 off, 2 on, 2 off (results in a dash-dot pattern).
[4, 2, 2, 2, 4, 2] : Gives a dash-dot-dash pattern.
[4, 2, 2, 2, 2, 2] : Gives a dash-dot-dot pattern.
See matplotlib 'set_dashes' command for more information.
'''
def do_dashstyle(self, line):
try:
self.modcurve(line, 'dashstyle', [line[line.index("["):],])
self.plotedit = True
except:
print "ERROR : dashstyle usage"
self.help_dashstyle()
if(self.debug): traceback.print_exc(file=sys.stdout)
##turn hiding on for given curves##
def do_hide(self, line):
try:
line = line + ' ON'
self.modcurve(line, 'hide', ['ON'])
except:
print 'error - usage: hide <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_hide(self):
print '\n Procedure: Hide curves from view\n Usage: hide <curve-list>\n'
##turn hiding off for given curves##
def do_show(self, line):
try:
line = line + ' OFF'
self.modcurve(line, 'hide', ['OFF'])
except:
print 'error - usage: show <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_show(self):
print '\n Procedure: Reveal curves hidden by hide command\n Usage: show <curve-list>\n'
##Use matplotlib style settings##
def do_style(self, line):
try:
line = line.split()
self.plotter.style = line[0]
self.updatestyle = True
self.redraw = True
except:
print 'error - usage: style <style-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_style(self):
print '''
Procedure: Use matplotlib style settings from a style specification.
The style name of 'default' is reserved for reverting back to the
default style settings.
Usage: style <style-name>
'''
##change the y range on the graph##
def do_range(self, line):
try:
line = line.split()
if(line and line[0] == 'de'):
self.ylim = None
elif(len(line) == 2):
self.ylim = (float(line[0]), float(line[1]))
else:
print 'error: exactly two arguments required or de for default'
except:
print 'error - usage: range <low-lim> <high-lim> or range de'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_range(self):
print '\n Procedure: Set the range for plotting\n Usage: range <low-lim> <high-lim> or\n Usage: range de\n Shortcuts: ran\n'
##change the x domain on the graph##
def do_domain(self, line):
try:
line = line.split()
if(line and line[0] == 'de'):
self.xlim = None
elif(len(line) == 2):
self.xlim = (float(line[0]), float(line[1]))
else:
print 'error: exactly two arguments required or de for default'
except:
print 'error - usage: domain <low-lim> <high-lim> or domain de'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_domain(self):
print '\n Procedure: Set the domain for plotting\n Usage: domain <low-lim> <high-lim> or\n Usage: domain de\n Shortcuts: dom\n'
##list currently graphed curves##
def do_list(self, line):
try:
reg = re.compile(r"")
if line:
try:
reg = re.compile(r"%s" % line)
except:
print 'error - invalid label-pattern'
return 0
for curve in self.plotlist:
searchline = curve.name + ' ' + curve.filename
if not line or reg.search(searchline):
plotname = ''
if curve.edited:
plotname = '*'
plotname = string.rjust(plotname + curve.plotname, 5)
name = pdvutil.truncate(string.ljust(curve.name, self.namewidth),self.namewidth)
fname = curve.filename
xmin = string.ljust('%.2e' % min(curve.x), 9)
xmax = string.ljust('%.2e' % max(curve.x), 9)
ymin = string.ljust('%.2e' % min(curve.y), 9)
ymax = string.ljust('%.2e' % max(curve.y), 9)
print '%s %s %s %s %s %s %s' % (plotname, name, xmin, xmax, ymin, ymax, fname)
except:
print 'error - usage: list [<label-pattern>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_list(self):
print '\n Macro: Display curves in list\n Usage: list [<label-pattern>]\n Shortcuts: lst\n'
## Delete the specified entries from the menu ##
def do_kill(self, line):
try:
if not line:
raise RuntimeError, "Argument(s) missing."
if 'all' in line:
self.curvelist = list()
else:
tmpcurvelist = list()
for i in range(len(self.curvelist)):
if not str(i+1) in line:
tmpcurvelist.append(self.curvelist[i])
self.curvelist = tmpcurvelist
except RuntimeError as rte:
print ' error - %s' % rte
print ' usage: kill [all | number-list]'
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
print 'error - usage: kill [all | number-list]'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
self.plotter.updateDialogs()
def help_kill(self):
print '\n Procedure: Delete the specified entries from the menu. ' \
'number-list is a space separated list of menu indexes\n Usage: kill [all | number-list]'
##print out curves loaded from files##
def do_menu(self, line):
try:
reg = re.compile(r"")
if(line):
try:
reg = re.compile(r"%s" % line)
# reg = re.compile(line)
except:
print 'error: invalid expression'
return 0
for i in range(len(self.curvelist)):
searchline = self.curvelist[i].name + ' ' + self.curvelist[i].filename
if not line or reg.search(searchline):
index = string.rjust(str(i+1),5)
name = pdvutil.truncate(string.ljust(self.curvelist[i].name, self.namewidth),self.namewidth)
fname = self.curvelist[i].filename
xmin = string.ljust('%.2e' % min(self.curvelist[i].x), 9)
xmax = string.ljust('%.2e' % max(self.curvelist[i].x), 9)
ymin = string.ljust('%.2e' % min(self.curvelist[i].y), 9)
ymax = string.ljust('%.2e' % max(self.curvelist[i].y), 9)
print '%s %s %s %s %s %s %s' % (index, name, xmin, xmax, ymin, ymax, fname)
except:
print 'error - usage: menu [<regex>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_menu(self):
print """\n Macro: List the available curves\n Usage: menu [<regex>]\n
Regular expressions are based on the Python regex syntax, not the UNIX syntax.
In particular, '*' is not the wildcard you might be expecting.
Some rules are:
'abc' Matches anything that has 'abc' in it anywhere
'.'
(Dot) Matches any character except a newline.
'^'
(Caret) Matches the start of the string.
'$' Matches the end of the string.
[] Used to indicate a set of characters.
'*'
Causes the resulting RE to match 0 or more repetitions of the
preceding RE, as many repetitions as are possible. ab* will match
'a', 'ab', or 'a' followed by any number of 'b's.
It is useful to know that '.*' matches any number of anythings, which
is often what people expect '*' to do.
EXAMPLES:
energy matches 'fluid energy', 'energy from gas', and 'blow energy blow'
dt.*cycle matches 'dt [sh] vs. cycle', and 'find dt on a bicycle please'.
^foo.*rat$ matches 'foobarat', 'foo rat', and 'foolish boy, now you will be eaten by a rat'
VR[de] matches 'bigVRdump', 'smallVRexit', but not 'mediumVRfront'
AX[deh-z] matches 'myAXjob', 'yourAXexit', 'AXnow', but not 'AXfoo'
For a painfully complete explanation of the regex syntax, type 'help regex'.
"""
def help_regex(self):
print "This is the Python help for the 're' module. 'help menu' will give you\n a shorter version."
help(re)
##drop to python prompt##
def do_drop(self, line):
self.redraw = False
return True
def help_drop(self):
print '\n Macro: Enter the python prompt for custom input\n Usage: drop\n'
##exit the program##
def do_quit(self, line):
try:
readline.write_history_file(os.getenv('HOME') + '/.pdvhistory')
except:
if self.debug: traceback.print_exc(file=sys.stdout)
finally:
self.app.quit()
sys.exit()
return True
def help_quit(self):
print '\n Macro: Exit PyDV\n Usage: quit\n Shortcuts: q\n'
##save figure to file##
def do_image(self, line):
try:
line = line.split()
filename = 'plot'
filetype = 'pdf'
if(len(line) > 1):
filetype = line.pop(-1)
filename = line.pop(-1)
elif(len(line) > 0):
filename = line.pop(-1)
plt.savefig(filename+'.'+filetype, format=filetype)
except:
print 'error - usage: image <file-name> <file-type: png | ps | pdf | svg>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_image(self):
print '\n Macro: Save the current figure to image file\n Usage: image <file-name> <file-type: png | ps | pdf | svg>\n'
##save given curves to a new ultra file##
def do_save(self, line):
if(not line):
return 0
try:
line = line.split()
filename = line.pop(0)
line = ' '.join(line)
if(len(line.split(':')) > 1):
self.do_save(filename + ' ' + pdvutil.getletterargs(line))
return 0
else:
f = open(filename, 'w')
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
pname = self.plotlist[j].plotname
if(pname == line[i].upper()):
f.write('#' + self.plotlist[j].name + '\n')
for dex in range(len(self.plotlist[j].x)):
f.write(' ' + str(self.plotlist[j].x[dex]) + ' ' + str(self.plotlist[j].y[dex]) + '\n')
break
except:
print 'error - usage: save <file-name> <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_save(self):
print '\n Macro: Save curves to file\n Usage: save <file-name> <curve-list>\n'
##save given curves to a CSV file##
def do_savecsv(self, line):
if(not line):
return 0
try:
line = line.split()
filename = line.pop(0)
line = ' '.join(line)
if(len(line.split(':')) > 1):
self.do_savecsv(filename + ' ' + pdvutil.getletterargs(line))
return 0
else:
# make list of curve indices
indices = []
#used to ensure number of points in each curve appended is equal
assertlength = None
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
pname = self.plotlist[j].plotname
if(pname == line[i].upper()):
if assertlength is not None and len(self.plotlist[j].x) != assertlength:
print 'error - All curves must have the same number of points.'
return 0
elif assertlength is None:
assertlength = len(self.plotlist[j].x)
indices.append(j)
break
else:
indices.append(j)
break
# write curves out in csv format
f = open(filename, 'w')
s = 'time, '
for j in indices[:-1]:
s += self.plotlist[j].name + ', '
s += self.plotlist[indices[-1]].name
s += '\n'
f.write(s)
try:
for i in range(len(self.plotlist[indices[0]].x)):
s = str(self.plotlist[indices[0]].x[i]) + ', '
for j in indices[:-1]:
s += str(self.plotlist[j].y[i]) + ', '
s += str(self.plotlist[indices[-1]].y[i])
s += '\n'
f.write(s)
f.close()
except IndexError:
print 'error - All curves must have the same number of points.'
except:
print 'error - usage: savecsv <file-name> <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_savecsv(self):
print '\n Macro: Save curves to file in comma separated values (csv) format\n Assumes all curves have the same x basis\n CSV file generated with number rows equal to number of points in first curve passed.\n Usage: savecsv <file-name> <curve-list>\n'
##Display text on the plot at the given plot location##
def do_annot(self, line):
if(not line):
return 0
try:
argline = line.split()
xloc = argline[-2]
yloc = argline[-1]
argdex = line.find(xloc)
line = line[:argdex]
self.usertexts.append([float(xloc), float(yloc), line])
except:
print 'error - usage: annot <text> <xloc> <yloc>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_annot(self):
print '\n Macro: Display text on the plot by axis x and y location\n Usage: annot <text> <xloc> <yloc>\n'
##List current annotations##
def do_listannot(self, line):
try:
for i in range(len(self.usertexts)):
dex = string.rjust(str(i+1), 5)
xloc = string.ljust('%.4f' % self.usertexts[i][0], 5)
yloc = string.ljust('%.4f' % self.usertexts[i][1], 5)
annot = pdvutil.truncate(string.ljust(self.usertexts[i][2], 50),50)
print '%s %s %s %s' % (dex, xloc, yloc, annot)
except:
print 'error - usage: listannot'
if(self.debug): traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_listannot(self):
print '\n Macro: List current annotations\n Usage: listannot\n'
##Remove the specified annotations##
def do_delannot(self, line):
if(not line):
return 0
try:
if(len(line.split(':')) > 1): #check for list notation
self.do_delannot(pdvutil.getnumberargs(line, self.filelist))
return 0
else:
print line
line = line.split()
rmlist = []
for i in range(len(line)):
rmlist.append(self.usertexts[int(line[i])-1])
for text in rmlist:
self.usertexts.remove(text)
except:
print 'error - usage: delannot <number-list-of-annotations>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_delannot(self):
print '\n Procedure: Delete annotations from list\n Usage: delannot <number-list-of-annotations>\n'
##generate a straight line and add to curve list##
def do_span(self, line):
"""
Generates a straight line of slope 1 and y intercept 0 in the specified domain with an optional number of points.
:param line: User Command-Line Input (span <xmin> <xmax> [<# pts>])
:type line: string
"""
if(not line):
return 0
try:
line = line.split()
numpts = 100
if(len(line) == 3):
numpts = int(line.pop(-1))
xmin = float(line[0])
xmax = float(line[1])
c = pydvif.span(xmin, xmax, numpts)
self.addtoplot(c)
self.plotedit = True
except:
print 'error - usage: span <xmin> <xmax> [<# pts>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_span(self):
print '\n Procedure: Generates a straight line of slope 1 and y intercept 0 in the specified domain with an optional number of points\n Usage: span <xmin> <xmax> [<# pts>]\n'
##generate y = mx + b line##
def do_line(self, line):
if(not line):
return 0
try:
line = line.split()
numpts = 100
if(len(line) == 5):
numpts = int(line.pop(-1))
slope = float(line[0])
yint = float(line[1])
xmin = float(line[2])
xmax = float(line[3])
c = pydvif.line(slope, yint, xmin, xmax, numpts)
self.addtoplot(c)
self.plotedit = True
except:
print 'error - usage: line <m> <b> <xmin> <xmax> [<# pts>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_line(self):
print '\n Procedure: Generate a line with y = mx + b and an optional number of points\n Usage: line <m> <b> <xmin> <xmax> [<# pts>]\n'
##generate curve from given x and y points##
def do_makecurve(self, line):
if(not line):
return 0
try:
line = line.strip().split(')')
x = numpy.fromstring(line[0].strip().strip('('), dtype=float, sep=' ')
y = numpy.fromstring(line[1].strip().strip('('), dtype=float, sep=' ')
if(len(x) != len(y)):
print 'error: must have same number of x and y values'
return 0
c = curve.Curve('', 'Curve')
c.x = x
c.y = y
self.addtoplot(c)
self.plotedit = True
except:
print 'error - usage: makecurve (<list of x-values>) (<list of y values>)'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_makecurve(self):
print '\n Macro: Generate a curve from two lists of numbers\n Usage: makecurve (<list of x-values>) (<list of y values>)\n Shortcuts: make-curve\n'
##filter out points##
def do_ymin(self, line):
try:
self.modcurve(line, 'ymin', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: ymin <curve-list> <limit>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ymin(self):
print '\n Procedure: Filter out points in curves whose y-values < limit\n Usage: ymin <curve-list> <limit>\n'
##filter out points##
def do_ymax(self, line):
try:
self.modcurve(line, 'ymax', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: ymax <curve-list> <limit>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ymax(self):
print '\n Procedure: Filter out points in curves whose y-values > limit\n Usage: ymax <curve-list> <limit>\n'
##filter out points##
def do_xmin(self, line):
try:
self.modcurve(line, 'xmin', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: xmin <curve-list> <limit>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xmin(self):
print '\n Procedure: Filter out points in curves whose x-values < limit\n Usage: xmin <curve-list> <limit>\n'
##filter out points##
def do_xmax(self, line):
try:
self.modcurve(line, 'xmax', [line.split()[-1]])
self.plotedit = True
except:
print 'error - usage: xmax <curve-list> <limit>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xmax(self):
print '\n Procedure: Filter out points in curves whose x-values > limit\n Usage: xmax <curve-list> <limit>\n'
##filter out points; this is the only filter points function that returns a new curve.
##It does that because that's how ULTRA behaved. Go figure.
def do_xminmax(self, line):
try:
line = line.split()
xmax = line.pop(-1)
xmin = line.pop(-1)
cName = line.pop(-1) # curve this one will be based on
# make a new curve by copying curve in argument
cur = self.curvefromlabel(cName) # old curve
cNew = cur.copy() # new curve
cNew.name = 'Extract ' + cName.upper() # we name the new curve from the old curve, just like ULTRA did
cNew.plotname = self.getcurvename()
cNew.color = '' # this, bizarrely, is how we tell PDV to pick a color for this curve on its own. yeesh. --DSM 12/02/2015
self.addtoplot(cNew)
# new lets just re-use the min and max functions we already have to trim the new curve
minline = ' '.join(cNew.plotname) + ' ' + xmin
maxline = ' '.join(cNew.plotname) + ' ' + xmax
self.do_xmin(minline)
self.do_xmax(maxline)
cNew.edited = False # don't mark the new curve as having been edited by min and max; user doesn't care how we did it.
self.plotedit = True
except:
print 'error - usage: xminmax <curve-list> <low-lim> <high-lim>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xminmax(self):
print '\n Procedure: Trim the selcted curves\n Usage: xminmax <curve-list> <low-lim> <high-lim>\n Shortcuts: xmm'
##filter out points##
def do_yminmax(self, line):
try:
line = line.split()
ymax = line.pop(-1)
ymin = line.pop(-1)
minline = ' '.join(line) + ' ' + ymin
maxline = ' '.join(line) + ' ' + ymax
self.do_ymin(minline)
self.do_ymax(maxline)
self.plotedit = True
except:
print 'error - usage: yminmax <curve-list> <low-lim> <high-lim>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_yminmax(self):
print '\n Procedure: Trim the selcted curves\n Usage: yminmax <curve-list> <low-lim> <high-lim>\n Shortcuts: ymm'
##take derivative of the curve##
def do_derivative(self, line):
if(not line):
return 0
try:
if(len(line.split(':')) > 1):
self.do_derivative(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()):
cur = self.plotlist[j]
"""nc = curve.Curve('', 'Derivative ' + cur.plotname)
nc.plotname = self.getcurvename()
y = []
for dex in range(len(cur.x)-1):
run = cur.x[dex+1] - cur.x[dex]
if(run == 0):
run = 1e-4
y.insert(dex, (cur.y[dex+1] - cur.y[dex])/run)
nc.y = numpy.array(y)
x = []
for dex in range(len(cur.x)-1):
x.insert(dex, cur.x[dex+1])
nc.x = numpy.array(x)"""
nc = self.derivative(cur)
self.addtoplot(nc)
break
self.plotedit = True
except:
print 'error - usage: der <curve-list>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_derivative(self):
print '\n Procedure: Take derivative of curves\n Usage: derivative <curve-list>\n Shortcuts: der\n'
##take the integral of the curve##
def do_integrate(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_integrate(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
xlow = None
xhi = None
subtrahend = 0
try:
xhi = float(line[-1])
subtrahend = -1
except:
xhi = None
try:
xlow = float(line[-2])
subtrahend = -2
except:
xlow = None
if (xlow is None and xhi is not None) or (xlow is not None and xhi is None):
raise RuntimeError, "<low-limit> and <high-limit> must BOTH be specified"
stop = len(line) + subtrahend
for i in range(stop):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
nc = pydvif.integrate(cur, xlow, xhi)[0]
self.addtoplot(nc)
break
self.plotedit = True
except:
print 'error - usage: integrate <curve-list> [<low-limit> <high-limit>]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_integrate(self):
print '\n Procedure: Integrate curves\n Usage: integrate <curve-list> [<low-limit> <high-limit>]\n Shortcuts: int\n'
##plot y of one curve against y of another curve
def do_vs(self, line):
if not line:
return 0
try:
line = line.split()
if len(line) != 2:
return 0
stuff = '0123456789'
fidx1 = stuff.find(line[0])
fidx2 = stuff.find(line[1])
if fidx1 >= 0 and fidx2 >= 0:
self.__vs_variant(line[0], line[1])
return
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[0].upper():
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
c2 = self.plotlist[i]
break
nc = pydvif.vs(c1, c2)
self.addtoplot(nc)
self.plotedit = True
except:
print 'error: requires exactly two curves as arguments'
if self.debug:
traceback.print_exc(file=sys.stdout)
def __vs_variant(self, arg0, arg1):
# This variant support directly plotting curve numbers against each other.
def _extract_curvelist_number(arg):
if ord(arg[0].upper()) >= ord('A') and ord(arg[0].upper()) <= ord('Z'): # Look for a.% type stuff
ifile_target = ord(arg[0].upper()) - ord('A')
icurve = 0
ifile = 0
lastfile = self.curvelist[0].filename
while icurve < len(self.curvelist) and ifile < ifile_target:
icurve += 1
if self.curvelist[icurve].filename != lastfile:
ifile += 1
lastfile = self.curvelist[icurve].filename
if icurve == len(self.curvelist):
print "error: curve index out of bounds: ", arg
return 0
icurve += int(arg.split('.')[-1]) - 1
return icurve
elif int(arg) > 0 and int(arg) <= len(self.curvelist):
return int(arg) - 1
else:
print "error: curve index out of bounds: ", arg
icur1, icur2 = _extract_curvelist_number(arg0), _extract_curvelist_number(arg1)
xc1, yc1 = numpy.array(self.curvelist[icur1].x), numpy.array(self.curvelist[icur1].y)
xc2, yc2 = numpy.array(self.curvelist[icur2].x), numpy.array(self.curvelist[icur2].y)
nc = curve.Curve('', '%s vs %s' % (arg0, arg1))
nc.x = yc2
nc.y = numpy.interp(xc2, xc1, yc1)
self.addtoplot(nc)
self.plotedit = True
return
def help_vs(self):
print '\n Procedure: Plot the range of the first curve against the range of the second curve\n Usage: vs <curve1> <curve2>\n'
##define error bars for a curve##
def do_errorbar(self, line):
if not line:
return 0
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line.split()[0].upper():
scur = self.plotlist[i]
break
try:
# y-error-curve, y+error-curve
line = line.split()
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
cury1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[2].upper():
cury2 = self.plotlist[i]
break
# x-error-curve, x+error-curve
curx1 = None
curx2 = None
if len(line) >= 5:
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[3].upper():
curx1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[4].upper():
curx2 = self.plotlist[i]
break
# point-skip
mod = 1
if len(line) == 6:
mod = line[5]
elif len(line) == 4:
mod = line[3]
pydvif.errorbar(scur, cury1, cury2, curx1, curx2, mod)
self.plotedit = True
except:
scur.ebar = None
print 'error - usage: errorbar <curve> <y-error-curve> <y+error-curve> [<x-error-curve> <x+error-curve>] [<point-skip>]'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_errorbar(self):
print '\n Procedure: Plot error bars on the given curve. Note: y-error-curve and y+error-curve are curves and not scalars.\n' \
' Usage: errorbar <curve> <y-error-curve> <y+error-curve> [<x-error-curve> <x+error-curve>] [<point-skip>]\n Shortcuts: error-bar\n'
##Define a shaded error range for a curve##
def do_errorrange(self, line):
if(not line):
return 0
for i in range(len(self.plotlist)):
if(self.plotlist[i].plotname == line.split()[0].upper()):
scur = self.plotlist[i]
break
try:
#if(scur.ebar == None):
# ebartemp = None
#else:
# ebartemp = [numpy.array(scur.ebar[0]), numpy.array(scur.ebar[1]), numpy.array(scur.ebar[2]), numpy.array(scur.ebar[3])]
self.do_errorbar(line)
scur.erange = [scur.ebar[0], scur.ebar[1]]
#scur.ebar = ebartemp
scur.ebar = None
self.plotedit = True
except:
scur.erange = None
print 'error - usage: errorrange <curve> <y-error-curve> <y+error-curve>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_errorrange(self):
print '\n Procedure: Plot shaded error region on given curve. Note: y-error-curve and y+error-curve are curves and not scalars\n' \
' Usage: errorrange <curve> <y-error-curve> <y+error-curve>\n Shortcuts: error-range\n'
##set the marker for scatter plots##
def help_marker(self):
print '''
Procedure: Set the marker symbol for scatter plots
Usage: marker <curve-list> <marker-style: + | . | circle | square | diamond> [<marker-size>]
Note: When setting this value through the interface or the curve object directly, use ONLY matplotlib supported marker types.
Matplotlib marker types are also supported here as well. See matplotlib documentation on markers for further information.
'''
def do_marker(self, line):
if(not line):
return 0
try:
if(len(line.split(':')) > 1):
self.do_marker(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
ultra_markers = {'circle' : 'o',
'square' : 's',
'diamond' : 'd'}
try:
try:
size = float(line[-1])
marker = line[-2]
except:
size = None
marker = line[-1]
if marker in ultra_markers:
marker = ultra_markers[marker]
except:
self.help_marker()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if(cur.plotname == line[i].upper()):
cur.marker = marker
if(size):
cur.markersize = size
break
self.plotedit = True
except:
self.help_marker()
if self.debug:
traceback.print_exc(file=sys.stdout)
##set the marker symbol for the curves##
def help_linemarker(self):
print '''
Procedure: Set the marker symbol for the curves
Usage: linemarker <curve-list> <marker-style: + | . | circle | square | diamond> [<marker-size>]
Note: When setting this value through the interface or the curve object directly, use ONLY matplotlib supported marker types.
Matplotlib marker types are also supported here as well. See matplotlib documentation on markers for further information.
'''
def do_linemarker(self, line):
if not line:
return 0
try:
if len(line.split(':')) > 1:
self.do_linemarker(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
ultra_markers = {'circle' : 'o',
'square' : 's',
'diamond' : 'd'}
try:
try:
size = float(line[-1])
marker = line[-2]
except:
size = None
marker = line[-1]
if marker in ultra_markers:
marker = ultra_markers[marker]
except:
self.help_linemarker()
if self.debug:
traceback.print_exc(file=sys.stdout)
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
cur.markerstyle = marker
if(size):
cur.markersize = size
break
self.plotedit = True
except:
self.help_linemarker()
if self.debug:
traceback.print_exc(file=sys.stdout)
##smooth the curve to given degree##
def do_smooth(self, line):
if not line:
return 0
try:
factor = 1
try:
factor = int(line.split()[-1])
line = line.split()
line.pop(-1)
line = ' '.join(line)
except:
factor = 1
if len(line.split(':')) > 1:
self.do_smooth(pdvutil.getletterargs(line) + str(factor))
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper(): #operate on each curve found in args
pydvif.smooth(cur, factor)
cur.edited = True
break
self.plotedit = True
except:
print 'error - usage: smooth <curve-list> [<smooth-factor>]'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_smooth(self):
print '\n Procedure: Smooth the curve to the given degree.\n Usage: smooth <curve-list> [<smooth-factor>]\n'
##make a new curve - the Fourier Transform of y-values the given curves##
def do_fft(self, line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_fft(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
for item in line:
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == item.upper():
c1 = self.plotlist[i]
nc1, nc2 = pydvif.fft(c1, norm="ortho")
self.addtoplot(nc1)
self.addtoplot(nc2)
breakc
self.plotedit = True
except:
print 'error - usage: fft <curve-list>'
if self.debug: traceback.print_exc(file=sys.stdout)
def help_fft(self):
print '\n Procedure: Compute the one-dimensional discrete Fourier Transform of the y values of the curves.' \
'\n Return real and imaginary parts.' \
'\n Usage: fft <curve-list>\n'
## Merge list of curves##
def do_appendcurves(self, line):
if not line:
return 0
if len(line.split(':')) > 1:
self.do_appendcurves(pdvutil.getletterargs(line))
return 0
else:
try:
line = line.split()
if len(line) < 2:
return
curves = list()
for i in range(len(self.plotlist)):
for j in range(len(line)):
if self.plotlist[i].plotname == line[j].upper():
curves.append(self.plotlist[i])
break
nc = pydvif.appendcurves(curves)
if nc is not None:
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_appendcurves()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_appendcurves(self):
print '\n Procedure: Merge a list of curves over the union of their domains. Where domains overlap, take' \
'\n the average of the curve\'s y-values.' \
'\n Usage: appendcurves <curve-list>\n'
##make a new curve - the convolution of two given curves##
def do_convolve(self, line):
if not line:
return 0
try:
line = line.split()
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[0].upper():
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
c2 = self.plotlist[i]
break
if len(line) == 2:
nc = pydvif.convolve(c1, c2)
elif len(line) == 3:
npts = int(line[2])
nc = pydvif.convolve(c1, c2, npts)
else:
raise RuntimeError("Wrong number of arguments, expecting 2 or 3 but received %d." % len(line))
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_convolve()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_convolve(self):
print '\n Procedure: Computes the convolution of the two given curves' \
'\n (g*h)(x) = Int(-inf, inf, dt*g(t)*h(x-t))' \
'\n This fast method uses FFT\'s and the interpolations involved may give incorrect' \
'\n results due to improper padding - use with caution.' \
'\n Usage: convolve <curve1> <curve2> [# points]\n Shortcuts: convol'
##make a new curve - slower convolution which doesn't use FFT's
def do_convolveb(self, line):
if not line:
return 0
try:
line = line.split()
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[0].upper():
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
c2 = self.plotlist[i]
break
if len(line) == 2:
nc = pydvif.convolveb(c1, c2)
elif len(line) == 3:
npts = int(line[2])
nc = pydvif.convolveb(c1, c2, npts)
else:
raise RuntimeError("Wrong number of arguments, expecting 2 or 3 but received %d." % len(line))
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_convolveb()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_convolveb(self):
print '\n Procedure: Computes the convolution of the two given curves' \
'\n (g*h)(x) = Int(-inf, inf, dt*g(t)*h(x-t))/' \
'\n Int(-inf, inf, dt*h(t))' \
'\n This slower method uses direct integration and minimal interpolations.' \
'\n curve2 is normalized to unit area before doing the convolution.' \
'\n Usage: convolveb <curve1> <curve2> [# points]\n Shortcuts: convolb'
##make a new curve - slower convolution which doesn't use FFT's
def do_convolvec(self, line):
if not line:
return 0
try:
line = line.split()
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[0].upper():
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
c2 = self.plotlist[i]
break
if len(line) == 2:
nc = pydvif.convolvec(c1, c2)
elif len(line) == 3:
npts = int(line[2])
nc = pydvif.convolvec(c1, c2, npts)
else:
raise RuntimeError("Wrong number of arguments, expecting 2 or 3 but received %d." % len(line))
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_convolvec()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_convolvec(self):
print '\n Procedure: Computes the convolution of the two given curves' \
'\n (g*h)(x) = Int(-inf, inf, dt*g(t)*h(x-t))/' \
'\n Int(-inf, inf, dt*h(t))' \
'\n This slower method uses direct integration and minimal interpolations.' \
'\n Usage: convolvec <curve1> <curve2> [# points]\n Shortcuts: convolc'
##make two new curves - the diff-measure of two given curves##
def do_diffMeasure(self, line):
if not line:
return 0
try:
tolerance = 1e-8
try:
tolerance = float(line.split()[-1])
line = line.split()
line.pop(-1)
line = ' '.join(line)
except:
tolerance = 1e-8
line = line.split()
for i in range(len(self.plotlist)):
if(self.plotlist[i].plotname == line[0].upper()):
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if(self.plotlist[i].plotname == line[1].upper()):
c2 = self.plotlist[i]
break
cdiff, cint = pydvif.diffMeasure(c1, c2, tolerance)
self.addtoplot(cdiff)
self.addtoplot(cint)
self.plotedit = True
print 'Difference measure for curves ' + c1.plotname + ' and ' + c2.plotname + ': ' + str(cint.y[-1])
except:
print 'error: requires exactly two curves as arguments'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_diffMeasure(self):
print '\n Procedure: Compare two curves. For the given curves a fractional difference measure and its average is computed\n Usage: diffMeasure <curve1> <curve2> [tolerance]\n'
## Compute the correlation function of the two curves ##
def do_correl(self, line):
if not line:
return 0
try:
line = line.split()
if len(line) != 2:
raise RuntimeError("Wrong number of arguments, expecting 2 but received %d." % len(line))
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[0].upper():
c1 = self.plotlist[i]
break
for i in range(len(self.plotlist)):
if self.plotlist[i].plotname == line[1].upper():
c2 = self.plotlist[i]
break
nc = pydvif.correlate(c1, c2, 'same')
self.addtoplot(nc)
self.plotedit = True
except RuntimeError as rte:
print 'error: %s' % rte
self.help_correl()
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
self.help_correl()
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_correl(self):
print '\n Procedure: Compute the correlation function of the two curves.\n Usage: correl <curve1> <curve2>\n'
## Changes background color of the plot, window, or both. ##
def do_bkgcolor(self, line):
try:
line = line.split()
color = line[-1]
if color.lower() == 'reset':
color = None
component = 'reset'
else:
if not mclr.is_color_like(color):
print 'error: invalid color ' + color
self.redraw = False
return 0
if len(line) > 1:
component = line[0].lower()
if component != 'plot' and component != 'window' and component != 'reset':
raise ValueError('\'%s\' is an invalid component name' % component)
else:
component = 'all'
if component == 'reset':
self.figcolor = self.plotter.figcolor
self.plotcolor = '#dddddd'
else:
if component == 'all' or component == 'window':
self.figcolor = color
if component == 'all' or component == 'plot':
self.plotcolor = color
self.plotedit = True
except ValueError as ve:
print '\nerror - %s' % ve.message
print 'usage: bkgcolor <[plot | window] color-name | reset>'
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
print 'usage: bkgcolor <[plot | window] color-name | reset>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_bkgcolor(self):
print "\n Procedure: Change the background color of the given component. If no component name is specified, then both components " \
"\n will be given the same color. See (https://matplotlib.org/users/colors.html) for all the different ways to specify color-name." \
"\n \'reset\' will return the plot and window colors to their original values.\n\n Usage: bkgcolor <[plot | window] color-name | reset>\n"
##edits the font color for various text components##
def do_fontcolor(self, line):
try:
line = line.split()
color = line[-1]
if not mclr.is_color_like(color):
print 'error: invalid color ' + color
self.redraw = False
return 0
if len(line) > 1:
com = line[0]
if com != 'xlabel' and com != 'ylabel' and com != 'xaxis' and com != 'yaxis' and com != 'title' and com != 'legend':
raise ValueError('\'%s\' is an invalid component name' % com)
else:
com = 'all'
if com == 'all' or com == 'xlabel':
self.xlabelcolor = color
if com == 'all' or com == 'ylabel':
self.ylabelcolor = color
if com == 'all' or com == 'xaxis':
self.xtickcolor = color
if com == 'all' or com == 'yaxis':
self.ytickcolor = color
if com == 'all' or com == 'title':
self.titlecolor = color
if com == 'all' or com == 'legend':
self.keycolor = color
self.plotedit = True
except ValueError as ve:
print '\nerror - %s' % ve.message
print 'usage: fontcolor [<component: xlabel | ylabel | xaxis | yaxis | legend | title>] <color-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
except:
print 'error - usage: fontcolor [<component: xlabel | ylabel | xaxis | yaxis | legend | title>] <color-name>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_fontcolor(self):
print '\n Procedure: Change the font color of given component\n fontcolor [<component: xlabel | ylabel | xaxis | yaxis | legend | title>] <color-name>\n'
##edits the font size for various text components##
def do_fontsize(self, line):
try:
line = line.split()
size = line[-1]
if(size != 'default' and size != 'de' and size != 'x-small' and size != 'small' and size != 'medium' and size != 'large' and size != 'x-large'):
size = float(size)
if(size > 40):
size = 40
if(len(line) > 1):
com = line[0]
else:
com = 'all'
if(com == 'all' or com == 'title'):
if(size == 'default' or size == 'de'):
self.titlefont = 'large'
else:
self.titlefont = size
if(com == 'all' or com == 'xlabel'):
if(size == 'default' or size == 'de'):
self.xlabelfont = 'medium'
else:
self.xlabelfont = size
if(com == 'all' or com == 'ylabel'):
if(size == 'default' or size == 'de'):
self.ylabelfont = 'medium'
else:
self.ylabelfont = size
if(com == 'all' or com == 'legend'):
if(size == 'default' or size == 'de'):
self.keyfont = 'small'
else:
self.keyfont = size
if(com == 'all' or com == 'tick'):
if(size == 'default' or size == 'de'):
self.axistickfont = 'medium'
else:
self.axistickfont = size
if(com == 'all' or com == 'curve'):
if(size == 'default' or size == 'de'):
self.curvelabelfont = 'medium'
else:
self.curvelabelfont = size
if(com == 'all' or com == 'annotation'):
if(size == 'default' or size == 'de'):
self.annotationfont = 'medium'
else:
self.annotationfont = size
except:
print 'error - usage: fontsize [<component: title | xlabel | ylabel | legend | tick | curve | annotation>] <numerical-size | small | medium | large | default>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_fontsize(self):
print '\n Procedure: Change the font size of given component, or overall scaling factor\n Usage: fontsize [<component: title | xlabel | ylabel | legend | tick | curve | annotation>] <numerical-size | small | medium | large | default>\n'
##change the window size and location##
def do_geometry(self, line):
try:
line = line.split()
if len(line) != 4:
raise RuntimeError, "Wrong number of arguments, expecting 4 but received %d." % len(line)
self.geometry = int(line[0]), int(line[1]), int(line[2]), int(line[3])
self.plotter.updateGeometry(self.geometry)
except:
self.redraw = False
print 'error - usage: geometry <xsize> <ysize> <xlocation> <ylocation>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_geometry(self):
print '\n Procedure: Change the PDV window size and location in pixels\n Usage: geometry <xsize> <ysize> <xlocation> <ylocation>\n Shortcuts: geom\n'
##re-id command re-identifies all the curves into continuous alphabetical order##
def do_reid(self, line):
try:
for i in range(len(self.plotlist)):
c = self.plotlist[i] # get i'th curve object
if(i < 26):
c.plotname = chr(ord('A')+i) # label by alphabet
else:
c.plotname = '@'+str(i+1) # after first 26 curves, go to @N labels
except:
print 'error - usage: re-id or reid'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_reid(self):
print '\n Procedure: relabel all the curves in order.\n Usage: re-id\n'
##change label for a curve##
def do_label(self, line):
try:
line = line.split()
c = line.pop(0)
line = ' '.join(line)
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if(cur.plotname == c.upper()):
cur.name = line
break
self.plotedit = True
except:
print 'error - usage: label <curve> <new-label>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_label(self):
print '\n Procedure: Change the key and list label for a curve\n Usage: label <curve> <new-label>\n'
##change label for a curve to the filename##
def do_labelFileNames(self, line):
try:
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.name.find(cur.filename) == -1:
cur.name += ' - ' + cur.filename
self.plotedit = True
except:
print 'error - usage: labelFileNames'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_labelFileNames(self):
print '\n Procedure: Change the key and list labels for all curves to append the filename\n Usage: labelFileNames\n'
##run a list of commands from a file##
def do_run(self, line):
try:
fname = line.strip()
if(fname[0] == '~'):
fname = os.getenv('HOME') + fname[1:]
f = open(fname, 'r')
for fline in f:
try:
if len(fline.strip()) == 0: continue # skip blank lines
if fline.strip()[0] == '#': continue # skip comment lines
fline = self.precmd(fline.strip())
args = fline.split()
cmd = args.pop(0)
if(cmd == 'image'):
self.updateplot
args = ' '.join(args)
send = 'self.do_' + cmd + '(\'' + args + '\')'
result = eval(send)
except SystemExit:
self.do_quit(line)
except:
print 'invalid command: ' + fline.strip()
if(self.debug): traceback.print_exc(file=sys.stdout)
self.plotedit = True
self.updateplot
except SystemExit:
self.do_quit(line)
except:
print 'error - usage: run <filename>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_run(self):
print '\n Procedure: Execute a list of commands from a file\n Usage: run <filename>\n'
##move given curves to the front of the plot##
def do_movefront(self, line):
try:
if(len(line.split(':')) > 1):
self.do_movefront(pdvutil.getletterargs(line))
return 0
else:
highest = 0
for c in self.plotlist:
if(c.plotprecedence > highest):
highest = c.plotprecedence
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if(cur.plotname == line[i].upper()):
cur.plotprecedence = highest + 1
self.plotedit = True
except:
print 'error - usage: movefront <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_movefront(self):
print '\n Procedure: Move the given curves so they are plotted on top\n Usage: movefront <curve-list>\n'
##read in a file of custom functions##
def do_custom(self, line):
try:
fname = line.strip()
try:
if(fname[0] == '~'):
fname = os.getenv('HOME') + fname[1:]
f = open(fname, 'r')
funcfile = f.read()
funcs = re.findall('do_\w+', funcfile)
exec funcfile
#print locals()
for func in funcs:
exec 'self.'+func+' = new.instancemethod('+func+', self, Command)'
except:
print 'error - invalid file: '+fname
if(self.debug): traceback.print_exc(file=sys.stdout)
except:
print 'error - usage: custom <file-name>'
if(self.debug): traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_custom(self):
print '\n Procedure: Load a file of custom functions to extend PDV. Functions must be of the form \'def do_commandname(self, line): ...\n Usage: custom <file-name>\n'
##allow user defined command synonyms##
def do_alias(self, line):
try:
cmd = line.split()[0]
alias = line.split()[1]
function = 'def do_'+alias+'(self, line): self.do_'+cmd+'(line)'
exec function
exec 'self.do_'+alias+' = new.instancemethod(do_'+alias+', self, Command)'
except:
print 'error - usage: alias <command> <alias>'
if self.debug:
traceback.print_exc(file=sys.stdout)
finally:
self.redraw = False
def help_alias(self):
print '\n Procedure: Define a synonym for an existing command\n Usage: alias <command> <alias>\n'
##plot copies of the given curves##
def do_copy(self, line):
try:
if(len(line.split(':')) > 1):
self.do_copy(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()): #operate on each curve found in args
cur = self.plotlist[j]
curout = cur.copy()
curout.plotname = ''
curout.color = ''
self.addtoplot(curout)
except:
print 'error - usage: copy <curve-list>'
if(self.debug): traceback.print_exc(file=sys.stdout)
def help_copy(self):
print '\n Procedure: Copy and plot the given curves\n Usage: copy <curve-list>\n'
##Set the y-values such that y[i] *= (x[i+1] - x[i])
def do_makeextensive(self, line):
try:
if len(line.split(':')) > 1:
self.do_makeextensive(pdvutil.getletterargs(line))
return 0
else:
curves = list()
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
if len(curves) > 0:
pydvif.makeextensive(curves)
else:
raise RuntimeError('Need to specify a valid curve or curves')
except:
print 'error - usage: makeextensive <curve-list>\n Shortcut: mkext'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_makeextensive(self):
print '\n Procedure: Set the y-values such that y[i] *= (x[i+1] - x[i]) \n Usage: makeextensive <curve-list>\n Shortcut: mkext'
##Set the y-values such that y[i] /= (x[i+1] - x[i])
def do_makeintensive(self, line):
try:
if len(line.split(':')) > 1:
self.do_makeintensive(pdvutil.getletterargs(line))
return 0
else:
curves = list()
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper():
curves.append(self.plotlist[j])
if len(curves) > 0:
pydvif.makeintensive(curves)
else:
raise RuntimeError('Need to specify a valid curve or curves')
except:
print 'error - usage: makeintensive <curve-list>\n Shortcut: mkint'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_makeintensive(self):
print '\n Procedure: Set the y-values such that y[i] /= (x[i+1] - x[i]) \n Usage: makeintensive <curve-list>\n Shortcut: mkint'
##Duplicate the x-values such that y = x for each of the given curves##
def do_dupx(self, line):
try:
if len(line.split(':')) > 1:
self.do_dupx(pdvutil.getletterargs(line))
return 0
else:
curves = list()
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper(): #operate on each curve found in args
curves.append(self.plotlist[j])
if len(curves) > 0:
pydvif.dupx(curves)
else:
raise RuntimeError('Need to specify a valid curve or curves')
except:
print 'error - usage: dupx <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_dupx(self):
print '\n Procedure: Duplicate the x-values such that y = x for each of the given curves\n Usage: dupx <curve-list>\n'
##Create curves with y-values vs. integer index values##
def do_xindex(self, line):
try:
if len(line.split(':')) > 1:
self.do_xindex(pdvutil.getletterargs(line))
return 0
else:
curves = list()
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if name == line[i].upper(): #operate on each curve found in args
curves.append(self.plotlist[j])
if len(curves) > 0:
pydvif.xindex(curves)
else:
raise RuntimeError('Need to specify a valid curve or curves')
except:
print 'error - usage: xindex <curve-list>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xindex(self):
print '\n Procedure: Create curves with y-values vs. integer index values\n Usage: xindex <curve-list>\n'
##set the number of ticks on the axes##
def do_ticks(self, line):
try:
if(line.strip() == 'de'):
self.numticks = 'de'
else:
self.numticks = int(line.strip())
if(self.numticks > 50):
self.numticks = 50
if(self.numticks < 2):
self.numticks = 2
except:
print 'error - usage: ticks <quantity> or ticks de'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ticks(self):
print '\n Variable: Set the maximum number of major ticks on the axes\n Usage: ticks <quantity> or\n Usage: ticks de\n'
##set the yticks explicitly##
def do_yticks(self, line):
try:
if line.strip() == 'de':
self.yticks = 'de'
elif isinstance(eval(line.strip()), Number):
self.yticks = eval(line.strip())
elif isinstance(eval(line.strip()), tuple):
if isinstance(eval(line.strip())[0], Number):
self.yticks = eval(line.strip())
else:
locs, labels = eval(line)
self.yticks = (locs, labels)
except:
print 'error - usage: yticks < de | integer | (list of locations) | (list of locations), (list of labels)>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_yticks(self):
print '\n Variable: Set the locations of major ticks on the y axis\n Usage: yticks < de | integer | (list of locations) | (list of locations), (list of labels)>'
## set the xticks explicitly ##
def do_xticks(self, line):
try:
if line.strip() == 'de':
self.xticks = 'de'
elif isinstance(eval(line.strip()), Number):
self.xticks = eval(line.strip())
elif isinstance(eval(line.strip()), tuple):
if isinstance(eval(line.strip())[0], Number):
self.xticks = eval(line.strip())
else:
locs, labels = eval(line)
self.xticks = (locs, labels)
except:
print 'error - usage: xticks < de | integer | (list of locations) | (list of locations), (list of labels)>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xticks(self):
print '\n Variable: Set the locations of major ticks on the x axis\n Usage: xticks < de | integer | (list of locations) | (list of locations), (list of labels)>'
## set the xticks length explicitly ##
def do_xticklength(self, line):
try:
if line.strip() == 'de':
self.xticklength = 3.2
elif isinstance(eval(line.strip()), Number):
self.xticklength = eval(line.strip())
except:
print 'error - usage: xticklength number'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xticklength(self):
print '\n Variable: Set the length (in points) of x ticks on the x axis.\n Usage: xticklength number'
## set the yticks length explicitly ##
def do_yticklength(self, line):
try:
if line.strip() == 'de':
self.yticklength = 3.2
elif isinstance(eval(line.strip()), Number):
self.yticklength = eval(line.strip())
except:
print 'error - usage: yticklength number'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_yticklength(self):
print '\n Variable: Set the length (in points) of y ticks on the y axis.\n Usage: yticklength number'
## set the xticks width explicitly ##
def do_xtickwidth(self, line):
try:
if line.strip() == 'de':
self.xtickwidth = .5
elif isinstance(eval(line.strip()), Number):
self.xtickwidth = eval(line.strip())
except:
print 'error - usage: xtickwidth number'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xtickwidth(self):
print '\n Variable: Set the width (in points) of x ticks on the x axis.\n Usage: xtickwidth number'
## set the yticks width explicitly ##
def do_ytickwidth(self, line):
try:
if line.strip() == 'de':
self.ytickwidth = .5
elif isinstance(eval(line.strip()), Number):
self.ytickwidth = eval(line.strip())
except:
print 'error - usage: ytickwidth number'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ytickwidth(self):
print '\n Variable: Set the width (in points) of y ticks on the y axis.\n Usage: ytickwidth number'
##set the ytickformat explicitly##
def do_ytickformat(self, line):
try:
if line.strip() == 'plain':
self.ytickformat = 'de'
else:
self.ytickformat = line.strip()
except:
print 'error - usage: ytickformat <plain | sci | exp | 10** | %[width][.precision][type]>.'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_ytickformat(self):
print '\n Variable: Set the format of major ticks on the y axis\n Usage: ytickformat <plain | sci | exp | 10** | %[width][.precision][type]>. \n Default is plain. ' \
'%[width][.precision][type] is the C-style (old Python style) format string (e.g., %5.1e).' \
'\n Note: exp and 10** only apply when ylogscale is set to on. C-style formating only applies when ylogscale is set to off.'
##set the xtickformat explicitly##
def do_xtickformat(self, line):
try:
if line.strip() == 'plain':
self.xtickformat = 'de'
else:
self.xtickformat = line.strip()
except:
print 'error - usage: xtickformat <plain | sci | exp | 10** | %[width][.precision][type]>.'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_xtickformat(self):
print '\n Variable: Set the format of major ticks on the x axis\n Usage: xtickformat <plain | sci | exp | 10** | %[width][.precision][type]>. \n Default is plain. ' \
'%[width][.precision][type] is the C-style (old Python style) format string (e.g., %5.1e).' \
'\n Note: exp and 10** only apply when xlogscale is set to on. C-style formating only applies when xlogscale is set to off.'
##set the font family##
def do_fontstyle(self, line):
try:
matplotlib.rc('font', family=line.strip())
except:
print 'error - usage: fontstyle <serif | sans-serif | monospace>'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_fontstyle(self):
print '\n Variable: Set the fontstyle family\n Usage: fontstyle <serif | sans-serif | monospace>\n'
##subsample the curves, i.e., reduce to every nth value.
def do_subsample(self, line):
if not line:
return 0
else:
try:
if len(line.split(':')) > 1:
self.do_subsample(pdvutil.getletterargs(line))
return 0
else:
line = line.split()
subtrahend = 0
try:
stride = int(line[-1])
subtrahend = -1
except:
stride = 2
stop = len(line) + subtrahend
curvelist = list()
for i in range(stop):
for j in range(len(self.plotlist)):
cur = self.plotlist[j]
if cur.plotname == line[i].upper():
curvelist.append(cur)
if len(curvelist) > 0:
print "\nSubsampling the data by stride %i...\n" % stride
pydvif.subsample(curvelist, stride, True)
self.plotedit = True
except:
print 'error - usage: subsample <curve-list> [stride]'
if self.debug:
traceback.print_exc(file=sys.stdout)
def help_subsample(self):
print '\n subsample <curve-list> [stride]'
print ' Subsample the curves by the optional stride. If stride is not specified defaults to 2.'
########################################################################################################
#helper functions#
########################################################################################################
##find the proper x-range##
def find_xrange(self):
orderlist = sorted(self.plotlist, key= lambda x: x.plotprecedence)
xmin, xmax = 1e300, -1e300
for cur in orderlist:
if not cur.hidden:
xmin = min(xmin, min(cur.x))
xmax = max(xmax, max(cur.x))
if self.xlogscale:
xmin = 1e-2
for cur in orderlist:
if not cur.hidden:
xdat = numpy.array(cur.x)
for i in range(len(xdat)):
if xdat[i] < 1e-300:
xdat[i] = 1e301
localmin = min(xdat)
if localmin and localmin < xmin:
xmin = localmin
if xmax < xmin:
xmax = xmin*10000
return xmin, xmax
##find the proper y-range##
def find_yrange(self):
orderlist = sorted(self.plotlist, key= lambda x: x.plotprecedence)
ymin, ymax = 1e300, -1e300
for cur in orderlist:
if not cur.hidden:
ymin = min(ymin, min(cur.y))
ymax = max(ymax, max(cur.y))
if self.ylogscale:
ymin = 1e-2
for cur in orderlist:
if not cur.hidden:
localmin = min(numpy.ma.masked_where(numpy.ma.array(cur.y) < 1e-300, numpy.ma.array(cur.y)))
if localmin and localmin < ymin:
ymin = localmin
if ymax < ymin:
ymax = ymin*10000
ymin *= 0.95
ymax *= 1.05
else:
bump = 0.05*(ymax - ymin)
ymin -= bump
ymax += bump
return ymin, ymax
##get curve from its label/plot name##
def curvefromlabel(self, label):
if(label[:1] != '@' and ord(label.upper()) >= ord('A') and ord(label.upper()) <= ord('Z')):
return self.plotlist[ord(label.upper()) - ord('A')]
else:
return self.plotlist[int(label[1:])-1]
return None
##ensure curve is valid and add it to the plotlist##
def addtoplot(self, cur):
if(cur.plotname == '' or (len(cur.plotname) > 1 and cur.plotname[0] != '@')):
cur.plotname = self.getcurvename()
cur.x = numpy.array(cur.x)
cur.y = numpy.array(cur.y)
if(len(cur.x) < 2 or len(cur.y) < 2):
raise ValueError('curve must have two or more points')
return
if(len(cur.x) != len(cur.y)):
raise ValueError('curve must have same number of x and y values')
return
if(cur.plotname[:1] != '@' and ord(cur.plotname) >= ord('A') and ord(cur.plotname) <= ord('Z')):
self.plotlist.insert((ord(cur.plotname) - ord('A')), cur)
else:
self.plotlist.insert(int(cur.plotname[1:])-1, cur)
##return derivative of curve##
def derivative(self, cur):
nc = pydvif.derivative(cur)
nc.plotname = self.getcurvename()
return nc
##find the next available curve name for the plot##
def getcurvename(self):
name = ''
for i in range(len(self.plotlist)):
if(i < 26):
if(self.plotlist[i].plotname != chr(ord('A')+i)):
return '' + chr(ord('A')+i)
else:
if(self.plotlist[i].plotname != ('@'+str(i+1))):
name = '@' + str(i+1)
return name
if(len(self.plotlist) < 26):
return '' + chr(ord('A')+len(self.plotlist))
else:
name = '@' + str(len(self.plotlist)+1)
return name
##find closest value in numpy array##
def getclosest(self, array, value):
i=(numpy.abs(array-value)).argmin()
return i
##operate on given curves by constant value depending on given operation flag##
def modcurve(self, line, flag, args=[]):
if(not line):
return 0
modvalue = args[0]
if(len(line.split(':')) > 1):
self.modcurve(pdvutil.getletterargs(line), flag, args)
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()): #operate on each curve found in args
cur = self.plotlist[j]
if(flag == 'my'):
cur.y *= float(modvalue)
cur.edited = True
elif(flag == 'mx'):
cur.x *= float(modvalue)
cur.edited = True
elif(flag == 'divy'):
if(float(modvalue) == 0):
modvalue = '1e-10'
cur.y /= float(modvalue)
cur.edited = True
elif(flag == 'divx'):
if(float(modvalue) == 0):
modvalue = '1e-10'
cur.x /= float(modvalue)
cur.edited = True
elif(flag == 'dy'):
cur.y += float(modvalue)
cur.edited = True
elif(flag == 'dx'):
cur.x += float(modvalue)
cur.edited = True
elif(flag == 'scatter'):
if(modvalue == '0' or modvalue.upper() == 'OFF'):
cur.scatter = False
elif(modvalue == '1' or modvalue.upper() == 'ON'):
cur.scatter = True
elif(flag == 'linespoints'):
if(modvalue == '0' or modvalue.upper() == 'OFF'):
cur.linespoints = False
elif(modvalue == '1' or modvalue.upper() == 'ON'):
cur.linespoints = True
elif(flag == 'lnwidth'):
cur.linewidth = float(modvalue)
elif flag == 'lnstyle':
if modvalue == 'solid':
cur.linestyle = '-'
elif modvalue == 'dot':
cur.linestyle = ':'
elif modvalue == 'dash':
cur.linestyle = '--'
elif modvalue == 'dotdash':
cur.linestyle = '-.'
cur.dashes = None # Restore default dash behaviour
elif(flag == 'drawstyle'):
# default, steps, steps-pre, steps-post
cur.drawstyle = modvalue
elif(flag == 'dashstyle'):
if modvalue[:2].upper() == 'DE':
cur.dashes = None
else:
val = eval(modvalue)
assert type(val) == list
assert len(val) % 2 == 0
assert min(val) > 0
cur.dashes = val
elif(flag == 'hide'):
if(modvalue == 'OFF'):
cur.hidden = False
elif(modvalue == 'ON'):
cur.hidden = True
elif(flag == 'getx'):
try:
getxvalues = pydvif.getx(cur, float(modvalue))
if getxvalues:
print '\nCurve ' + cur.plotname
for i in xrange(len(getxvalues)):
x, y = getxvalues[i]
print ' x: %.6e y: %.6e\n' % (x, y)
except ValueError as detail:
print 'Error: %s' % detail
elif(flag == 'gety'):
try:
getyvalues = pydvif.gety(cur, float(modvalue))
if getyvalues:
print '\nCurve ' + cur.plotname
for i in xrange(len(getyvalues)):
x, y = getyvalues[i]
print ' x: %.6e y: %.6e' % (x, y)
except ValueError as detail:
print 'Error: %s' % detail
elif(flag == 'xmin'):
nx = []
ny = []
for dex in range(len(cur.x)):
if(cur.x[dex] >= float(modvalue)):
nx.append(cur.x[dex])
ny.append(cur.y[dex])
if(len(nx) >= 2):
cur.x = numpy.array(nx)
cur.y = numpy.array(ny)
cur.edited = True
else:
cur.plotname = ''
self.plotlist.pop(j)
elif(flag == 'xmax'):
nx = []
ny = []
for dex in range(len(cur.x)):
if(cur.x[dex] <= float(modvalue)):
nx.append(cur.x[dex])
ny.append(cur.y[dex])
if(len(nx) >= 2):
cur.x = numpy.array(nx)
cur.y = numpy.array(ny)
cur.edited = True
else:
cur.plotname = ''
self.plotlist.pop(j)
elif(flag == 'ymin'):
nx = []
ny = []
for dex in range(len(cur.y)):
if(cur.y[dex] >= float(modvalue)):
nx.append(cur.x[dex])
ny.append(cur.y[dex])
if(len(nx) >= 2):
cur.x = numpy.array(nx)
cur.y = numpy.array(ny)
cur.edited = True
else:
cur.plotname = ''
self.plotlist.pop(j)
elif(flag == 'ymax'):
nx = []
ny = []
for dex in range(len(cur.y)):
if(cur.y[dex] <= float(modvalue)):
nx.append(cur.x[dex])
ny.append(cur.y[dex])
if(len(nx) >= 2):
cur.x = numpy.array(nx)
cur.y = numpy.array(ny)
cur.edited = True
else:
cur.plotname = ''
self.plotlist.pop(j)
break
##operate on given curves by a function##
def func_curve(self, line, flag, do_x=0, args=[]):
import scipy.special
scipy.special.errprint(1)
if(not line):
return 0
if(len(line.split(':')) > 1):
self.func_curve(pdvutil.getletterargs(line), flag, do_x, args)
return 0
else:
line = line.split()
for i in range(len(line)):
for j in range(len(self.plotlist)):
name = self.plotlist[j].plotname
if(name == line[i].upper()): #operate on each curve found in args
cur = self.plotlist[j]
if (flag == 'abs'):
if (do_x == 0):
cur.y = numpy.abs(cur.y)
cur.name = 'abs(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.abs(cur.x)
cur.name = 'absx(' + cur.name + ')'
cur.edited = True
elif (flag == 'exp'):
if (do_x == 0):
cur.y = numpy.exp(cur.y)
cur.name = 'exp(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.exp(cur.x)
cur.name = 'expx(' + cur.name + ')'
cur.edited = True
elif(flag == 'sin'):
if (do_x == 0):
cur.y = numpy.sin(cur.y)
cur.name = 'sin(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.sin(cur.x)
cur.name = 'sinx(' + cur.name + ')'
cur.edited = True
elif (flag == 'cos'):
if (do_x == 0):
cur.y = numpy.cos(cur.y)
cur.name = 'cos(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.cos(cur.x)
cur.name = 'cosx(' + cur.name + ')'
cur.edited = True
elif (flag == 'tan'):
if (do_x == 0):
cur.y = numpy.tan(cur.y)
cur.name = 'tan(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.tan(cur.x)
cur.name = 'tanx(' + cur.name + ')'
cur.edited = True
elif(flag == 'asin'):
if (do_x == 0):
cur.y = numpy.arcsin(cur.y)
cur.name = 'asin(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arcsin(cur.x)
cur.name = 'asinx(' + cur.name + ')'
cur.edited = True
elif (flag == 'acos'):
if (do_x == 0):
cur.y = numpy.arccos(cur.y)
cur.name = 'acos(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arccos(cur.x)
cur.name = 'acosx(' + cur.name + ')'
cur.edited = True
elif (flag == 'atan'):
if (do_x == 0):
cur.y = numpy.arctan(cur.y)
cur.name = 'atan(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arctan(cur.x)
cur.name = 'atanx(' + cur.name + ')'
cur.edited = True
elif(flag == 'sinh'):
if (do_x == 0):
cur.y = numpy.sinh(cur.y)
cur.name = 'sinh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.sinh(cur.x)
cur.name = 'sinhx(' + cur.name + ')'
cur.edited = True
elif (flag == 'cosh'):
if (do_x == 0):
cur.y = numpy.cosh(cur.y)
cur.name = 'cosh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.cosh(cur.x)
cur.name = 'coshx(' + cur.name + ')'
cur.edited = True
elif (flag == 'tanh'):
if (do_x == 0):
cur.y = numpy.tanh(cur.y)
cur.name = 'tanh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.tanh(cur.x)
cur.name = 'tanhx(' + cur.name + ')'
cur.edited = True
elif(flag == 'asinh'):
if (do_x == 0):
cur.y = numpy.arcsinh(cur.y)
cur.name = 'asinh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arcsinh(cur.x)
cur.name = 'asinhx(' + cur.name + ')'
cur.edited = True
elif (flag == 'acosh'):
if (do_x == 0):
cur.y = numpy.arccosh(cur.y)
cur.name = 'acosh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arccosh(cur.x)
cur.name = 'acoshx(' + cur.name + ')'
cur.edited = True
elif (flag == 'atanh'):
if (do_x == 0):
cur.y = numpy.arctanh(cur.y)
cur.name = 'atanh(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.arctanh(cur.x)
cur.name = 'atanhx(' + cur.name + ')'
cur.edited = True
elif (flag == 'j0'):
if (do_x == 0):
cur.y = scipy.special.j0(cur.y)
cur.name = 'j0(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.j0(cur.x)
cur.name = 'j0x(' + cur.name + ')'
cur.edited = True
elif (flag == 'j1'):
if (do_x == 0):
cur.y = scipy.special.j1(cur.y)
cur.name = 'j1(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.j1(cur.x)
cur.name = 'j1x(' + cur.name + ')'
cur.edited = True
elif (flag == 'jn'):
if (do_x == 0):
cur.y = scipy.special.jn(float(args[0]),cur.y)
cur.name = 'jn(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.jn(float(args[0]),cur.x)
cur.name = 'jnx(' + cur.name + ')'
cur.edited = True
elif (flag == 'y0'):
if (do_x == 0):
cur.y = scipy.special.y0(cur.y)
cur.name = 'y0(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.y0(cur.x)
cur.name = 'y0x(' + cur.name + ')'
cur.edited = True
elif (flag == 'y1'):
if (do_x == 0):
cur.y = scipy.special.y1(cur.y)
cur.name = 'y1(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.y1(cur.x)
cur.name = 'y1x(' + cur.name + ')'
cur.edited = True
elif (flag == 'yn'):
if (do_x == 0):
cur.y = scipy.special.yn(int(args[0]),cur.y)
cur.name = 'yn(' + cur.name + ')'
cur.edited = True
else:
cur.x = scipy.special.yn(int(args[0]),cur.x)
cur.name = 'ynx(' + cur.name + ')'
cur.edited = True
elif (flag == 'powa'):
if (do_x == 0):
cur.y = numpy.power(float(args[0]),cur.y)
cur.name = 'powa(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.power(float(args[0]),cur.x)
cur.name = 'powax(' + cur.name + ')'
cur.edited = True
elif (flag == 'powr'):
if (do_x == 0):
cur.y = numpy.power(cur.y,float(args[0]))
cur.name = 'powr(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.power(cur.x,float(args[0]))
cur.name = 'powrx(' + cur.name + ')'
cur.edited = True
elif (flag == 'recip'):
if (do_x == 0):
cur.y = numpy.reciprocal(cur.y)
cur.name = 'recip(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.reciprocal(cur.x)
cur.name = 'recipx(' + cur.name + ')'
cur.edited = True
elif (flag == 'sqr'):
if (do_x == 0):
cur.y = numpy.square(cur.y)
cur.name = 'sqr(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.square(cur.x)
cur.name = 'sqrx(' + cur.name + ')'
cur.edited = True
elif (flag == 'sqrt'):
if (do_x == 0):
cur.y = numpy.sqrt(cur.y)
cur.name = 'sqrt(' + cur.name + ')'
cur.edited = True
else:
cur.x = numpy.sqrt(cur.x)
cur.name = 'sqrtx(' + cur.name + ')'
cur.edited = True
break
def apply_uichanges(self):
"""
Applies the changes made by the user from the GUI.
"""
self.plotter.plotChanged = False
cur_axes = plt.gca() # Get current axes
#Save Logscale
if cur_axes.get_xscale() == "linear":
self.xlogscale = False
else:
self.xlogscale = True
if cur_axes.get_yscale() == "linear":
self.ylogscale = False
else:
self.ylogscale = True
#Save plot title
self.title = cur_axes.get_title()
#Save x and y limits
if self.guilims:
self.xlim = cur_axes.get_xlim()
self.ylim = cur_axes.get_ylim()
#Save x and y labels
self.xlabel = cur_axes.get_xlabel()
self.ylabel = cur_axes.get_ylabel()
#Update Curves
orderlist = sorted(self.plotlist, key= lambda x: x.plotprecedence)
plotcurvelist = cur_axes.get_lines()
for i in range(len(plotcurvelist)):
if not orderlist[i].hidden:
c = orderlist[i]
c.name = plotcurvelist[i].get_label()
c.linestyle = plotcurvelist[i].get_linestyle()
c.drawstyle = plotcurvelist[i].get_drawstyle()
c.linewidth = plotcurvelist[i].get_linewidth()
c.color = plotcurvelist[i].get_color()
# Marker properties
c.markerstyle = plotcurvelist[i].get_marker()
c.markersize = plotcurvelist[i].get_markersize()
c.markerfacecolor = plotcurvelist[i].get_markerfacecolor()
c.markeredgecolor = plotcurvelist[i].get_markeredgecolor()
##iterates through plotlist and displays curves on graph##
@property
def updateplot(self):
try:
if stylesLoaded:
if self.updatestyle:
styles = pydvif.get_styles()
try:
idx = styles.index(self.plotter.style)
style.use(styles[idx])
except:
if len(styles) > 0:
print "\nStyle Error: %s doesn't exist, defaulting to %s\n" % (self.plotter.style, styles[0])
self.plotter.style = styles[0]
style.use(styles[0])
else:
print "\nStyle Error: no styles available\n"
self.updatestyle = False
plt.clf()
plt.cla()
cur_axes = plt.gca()
if self.plotcolor is not None:
cur_axes.patch.set_facecolor(self.plotcolor)
if self.figcolor is not None:
self.plotter.fig.set_facecolor(self.figcolor)
# Setup Plot Attributes
xlabeltext = plt.xlabel(self.xlabel, fontsize = self.xlabelfont)
if self.xlabelcolor is not None:
xlabeltext.set_color(self.xlabelcolor)
ylabeltext = plt.ylabel(self.ylabel, fontsize = self.ylabelfont)
if self.ylabelcolor is not None:
ylabeltext.set_color(self.ylabelcolor)
title = plt.title(self.title, fontsize = self.titlefont)
if self.titlecolor is not None:
title.set_color(self.titlecolor)
if self.xtickcolor is not None:
for label in cur_axes.get_xticklabels():
label.set_color(self.xtickcolor)
if self.ytickcolor is not None:
for label in cur_axes.get_yticklabels():
label.set_color(self.ytickcolor)
plt.xticks(size=self.axistickfont)
plt.yticks(size=self.axistickfont)
for tlabel in plt.axes().get_xticklabels(minor=True):
plt.setp(tlabel, size=self.axistickfont)
for tlabel in plt.axes().get_yticklabels(minor=True):
plt.setp(tlabel, size=self.axistickfont)
if len(self.plotlist) < 1:
plt.draw()
self.plotter.canvas.update()
self.plotter.canvas.draw()
return 0
#set scaling and tick locations
#
# Notes on matplotlib that I found very helpful:
#
# plt.gca() is "get current axes instance"
# ScalarFormatter works with linear scales, MaxNLocator
# LogFormatter is needed to work with LogLocator, produces 1,10,100,...
# LogFormatterExponent produces 0,1,2,...
# LogFormatterMathtext produces 10**0,10**1,10**2,...
xls = self.xlogscale
yls = self.ylogscale
if(xls):
cur_axes.set_xscale('log', nonposx='clip')
if(yls):
cur_axes.set_yscale('log', nonposy='clip')
# thinking about what we want here
# xticks de
# or a number
# or a list of locations
# or a tuple of (locations, labels)
# xls on or off
# xtickformat = 'sci', 'plain', 'exp', '10**'
# set x,y tick sizes and tick label format
cur_axes.tick_params(axis='x', length=self.xticklength, width=self.xtickwidth)
cur_axes.tick_params(axis='y', length=self.yticklength, width=self.ytickwidth)
yaxis = cur_axes.yaxis
xaxis = cur_axes.xaxis
self.tickFormat(yaxis, self.ylogscale, self.yticks, self.ytickformat)
self.tickFormat(xaxis, self.xlogscale, self.xticks, self.xtickformat)
# plot the grid, if grid turned on
if self.showgrid:
if plt.xlim is not None and plt.ylim is not None:
if((plt.xlim()[0]*100 > plt.xlim()[1] and xls) or (plt.ylim()[0]*100 > plt.ylim()[1] and yls)):
plt.grid(True, which='majorminor')
else:
plt.grid(True)
else:
plt.grid(True)
else:
plt.grid(False)
# order list in which curves should be plotted
orderlist = sorted(self.plotlist, key= lambda x: x.plotprecedence)
#plot the curves
for cur in orderlist:
if not cur.hidden:
xdat = numpy.array(cur.x)
ydat = numpy.array(cur.y)
if yls:
for i in range(len(ydat)):
if(ydat[i] < 0):
ydat[i] = 1e-301 #custom ydata clipping
if xls:
for i in range(len(xdat)):
if xdat[i] < 0:
xdat[i] = 1e-301 #custom ydata clipping
if cur.ebar is not None:
plt.errorbar(xdat,
ydat,
yerr=[cur.ebar[0], cur.ebar[1]],
xerr=[cur.ebar[2], cur.ebar[3]],
fmt='-')
c = plt.plot(xdat, ydat)
elif cur.erange is not None:
c = plt.plot(xdat, ydat)
plt.fill_between(xdat,
ydat - cur.erange[0],
ydat + cur.erange[1],
alpha=0.4,
color=c[0].get_color())
c = plt.plot(xdat, ydat)
else:
c = plt.plot(xdat, ydat)
if cur.color != '':
plt.setp(c, color=cur.color)
else:
cur.color = c[0].get_color()
if cur.linespoints:
plt.setp(c, marker=cur.marker, markersize=cur.markersize, linestyle=cur.linestyle)
elif cur.scatter:
plt.setp(c, marker=cur.marker, markersize=cur.markersize, linestyle=' ')
else:
if cur.markeredgecolor is None:
cur.markeredgecolor = cur.color
if cur.markerfacecolor is None:
cur.markerfacecolor = cur.color
# plt.setp(c, marker=cur.markerstyle, markeredgecolor=cur.markeredgecolor, markerfacecolor=cur.markerfacecolor, linestyle=cur.linestyle)
plt.setp(c, marker=cur.markerstyle, markersize=cur.markersize, markeredgecolor=cur.markeredgecolor,
markerfacecolor=cur.markerfacecolor, linestyle=cur.linestyle)
c[0].set_drawstyle(cur.drawstyle)
c[0]._invalidx = True # Work-around for set_drawstyle bug (https://github.com/matplotlib/matplotlib/issues/10338)
if cur.linewidth:
plt.setp(c, lw=cur.linewidth)
plt.setp(c, mew=cur.linewidth)
elif self.linewidth:
plt.setp(c, lw=self.linewidth)
plt.setp(c, mew=self.linewidth)
plt.setp(c, label=cur.name)
if cur.dashes is not None:
c[0].set_dashes(cur.dashes)
#ensure proper view limits
#plt.axis('tight')
if self.xlim is not None:
plt.xlim(self.xlim[0], self.xlim[1])
if self.ylim is not None:
plt.ylim(self.ylim[0], self.ylim[1])
#plot the curve labels
if self.showletters:
#get range and domain of plot
xmin = plt.axis()[0]
xmax = plt.axis()[1]
ymin = plt.axis()[2]
ymax = plt.axis()[3]
spacing = (xmax-xmin)/6
offset = 0
for cur in orderlist:
if not cur.hidden:
plt.text(cur.x[0], cur.y[0], cur.plotname, color=cur.color, fontsize=self.curvelabelfont)
curxmax = max(cur.x)
curxmin = min(cur.x)
if self.xlim is not None:
if self.xlim[1] < curxmax:
curxmax = self.xlim[1]
if self.xlim[0] > curxmin:
curxmin = self.xlim[0]
spacing = (curxmax-curxmin)/6
labelx = curxmin + offset*spacing/len(self.plotlist)
while labelx < curxmax: #print letter labels along curves
close = self.getclosest(cur.x, labelx)
if(cur.y[close] <= ymax and cur.y[close] >= ymin):
plt.text(cur.x[close], cur.y[close], cur.plotname, color=cur.color, fontsize=self.curvelabelfont)
labelx += spacing
plt.text(cur.x[-1], cur.y[-1], cur.plotname, color=cur.color, fontsize=self.curvelabelfont)
offset += 1
#fonts/labels/legend
if self.showkey:
leg = plt.legend(fancybox=True, numpoints=1, loc=self.key_loc, ncol=self.key_ncol, handlelength=self.handlelength)
if leg is not None:
leg.get_frame().set_alpha(0.9)
#leg = plt.gca().get_legend()
leg.draggable(state=True)
ltext = leg.get_texts()
plt.setp(ltext, fontsize=self.keyfont)
plt.setp(ltext, color=self.keycolor)
for text in self.usertexts:
plt.text(text[0], text[1], text[2], fontsize = self.annotationfont)
plt.draw()
self.plotter.canvas.update()
self.plotter.canvas.draw()
except RuntimeError as detail:
if(detail[-1].split()[0] == 'LaTeX'):
print 'error: invalid LaTeX syntax'
else:
print 'error: draw may not have completed properly: %s' % detail
if(self.debug): traceback.print_exc(file=sys.stdout)
except OverflowError:
print 'Caught overflow error attempting to plot. Try using "subsample" to reduce the data.'
except:
print 'error: draw may not have completed properly'
if(self.debug):
traceback.print_exc(file=sys.stdout)
finally:
self.plotter.updateDialogs()
##load an ultra file and add parsed curves to the curvelist##
def load(self, fname, gnu=False, pattern=None, matches=None):
curves = pydvif.read(fname, gnu, self.xCol, self.debug, pattern, matches)
if len(curves) > 0:
self.curvelist += curves
self.filelist.append((fname, len(curves)))
##load a csv (commas separated values) text data file, add parsed curves to the curvelist##
def load_csv(self, fname):
curves = pydvif.readcsv(fname, self.xCol, self.debug)
if len(curves) > 0:
self.curvelist += curves
self.filelist.append((fname, len(curves)))
##read in a resource definition file##
def loadrc(self):
try:
f = open(os.getenv('HOME') + '/.pdvrc', 'r')
for line in f:
try:
line = line.split('=')
var = line[0].strip().lower()
val = line[1].strip()
if(var == 'xlabel'):
self.xlabel = val
elif(var == 'ylabel'):
self.ylabel = val
elif(var == 'title'):
self.title = val
elif(var == 'namewidth'):
self.namewidth = int(val)
elif(var == 'key'):
if(val.upper() == 'ON' or val == str(1)):
self.showkey = True
else:
self.showkey = False
elif var == 'grid':
if val.upper() == 'ON' or val == str(1):
self.showgrid = True
else:
self.showgrid = False
elif(var == 'letters'):
if(val.upper() == 'ON' or val == str(1)):
self.showletters = True
else:
self.showletters = False
elif(var == 'geometry'):
vals = val.split()
self.geometry = vals[0], vals[1], vals[2], vals[3]
elif(var == 'initcommands'):
self.initrun = ''.join(val)
elif var == 'fontsize':
self.titlefont = val
self.xlabelfont = val
self.ylabelfont = val
self.keyfont = val
self.axistickfont = val
self.curvelabelfont = val
self.annotationfont = val
elif var == 'lnwidth':
self.linewidth = val
except:
continue
f.close()
except:
return 0
# set tick format and locations
def tickFormat(self, axis, logscale, ticks, tickformat):
if logscale:
if tickformat == 'de' or tickformat == 'sci':
axis.set_major_formatter(matplotlib.ticker.LogFormatter())
elif tickformat == 'exp':
axis.set_major_formatter(matplotlib.ticker.LogFormatterExponent())
elif tickformat == '10**':
axis.set_major_formatter(matplotlib.ticker.LogFormatterMathtext())
else:
if tickformat[0] == '%':
print '\nWarning: C-style formating can not be applied when logscale is on'
else:
print '\nError: Unknown xtick format (%s)' % tickformat
axis.set_major_formatter(matplotlib.ticker.LogFormatter())
else:
if tickformat == 'de' or tickformat == 'exp' or tickformat == '10**':
if tickformat == 'exp' or tickformat == '10**':
print '\nWarning: logscale is off. exp and 10** only apply when logscale is on'
axis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
elif tickformat == 'sci':
axis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%5.2e'))
else:
if tickformat[0] == '%':
if tickformat[-1] in ('d', 'e', 'E', 'f', 'F'):
axis.set_major_formatter(matplotlib.ticker.FormatStrFormatter(tickformat))
else:
print '\nError: %s is an unsupported xtickformat type' % tickformat[-1]
axis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
else:
print "\nError: Unknown xtick format. Try adding '%' to the beginning of your format string"
axis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
# if ticks is set, figure out what user wants for ticks
if ticks != 'de':
if isinstance(ticks, int):
#print 'setting ticks to number ', ticks
axis.set_major_locator(matplotlib.ticker.MaxNLocator(ticks))
elif isinstance(ticks, tuple): # could be locations, could be (locations, labels)
if isinstance(ticks[0], Number): # it's a tuple of locations
#print 'setting ticks to tuple ', ticks
axis.set_major_locator(matplotlib.ticker.FixedLocator(ticks))
axis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticks))
if isinstance(ticks[0], tuple) and len(ticks) == 2: # it's (locations, labels)
#print 'setting ticks to loc,label ', ticks
axis.set_major_locator(matplotlib.ticker.FixedLocator(ticks[0]))
axis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticks[1]))
else: # I can't figure this out, throw an exception
print "CAN'T SET TICKS!!!"
raise RuntimeError, 'ticks set to bad value'
def console_run(self):
while True:
self.cmdloop('\n\tPython Data Visualizer 2.4.2 - 04.24.2019\n\tType "help" for more information.\n\n')
print '\n Starting Python Console...\n Ctrl-D to return to PyDV\n'
console = code.InteractiveConsole(locals())
console.interact()
################################################################################################
##### private functions
################################################################################################
def __qtMsgHandler(self, type, msg):
if self.debug:
if type == QtDebugMsg:
print "\nQt Debug: %s\n" % msg
elif type == QtWarningMsg:
print "\nQt Warning: %s\n" % msg
elif type == QtCriticalMsg:
print "\nQt Critical: %s\n" % msg
elif type == QtFatalMsg:
print "\nQt Fatal: %s\n" % msg
elif type == QtSystemMsg:
print "\nQt System: %s\n" % msg
################################################################################################
##### main function
################################################################################################
def main(self):
matplotlib.rc('text', usetex=False)
matplotlib.rc('font', family='sans-serif')
self.loadrc()
qInstallMsgHandler(self.__qtMsgHandler)
self.app = QApplication(sys.argv)
self.plotter = pdvplot.Plotter(self)
self.plotter.updateGeometry(self.geometry)
try:
readline.read_history_file(os.getenv('HOME') + '/.pdvhistory')
except:
f = open(os.getenv('HOME') + '/.pdvhistory', 'w')
f.close()
# throw into column format mode if there is a -gnu or -csv arg
gnu = False
csv = False
for i in xrange(len(sys.argv[1:])):
if sys.argv[i] == '-gnu':
gnu = True
try:
self.xCol = int(sys.argv[i+1])
sys.argv.pop(i+1)
except ValueError:
self.xCol = 0
sys.argv.remove('-gnu')
break
if sys.argv[i] == '-csv':
csv = True
try:
self.xCol = int(sys.argv[i+1])
sys.argv.pop(i+1)
except ValueError:
self.xCol = 0
sys.argv.remove('-csv')
break
if gnu or csv:
print 'Going to column format, using ', self.xCol, ' for x-axis data'
initarg = False
for i in range(len(sys.argv)): # look for command line args:
if(i != 0): # '-i commandfile', and/or 'datafile1 datafile2 ...'
if(sys.argv[i] == '-i' or sys.argv[i] == '--init'):
initarg = True
elif(initarg == True):
initarg = sys.argv[i]
else:
if not csv:
self.load(sys.argv[i], gnu)
else:
self.load_csv(sys.argv[i])
if(self.initrun != None): # does the .pdvrc specify a file to run of initial commands?
self.do_run(self.initrun) # yes? then run the file.
if(isinstance(initarg, str)): # If there was a '-i file' specified, run that file
self.do_run(initarg)
self.postcmd(0, 'run')
try:
# Start interactive console in separate thread
thread = Thread(target = self.console_run)
thread.start()
# Start PyDV Application
self.plotter.show()
self.app.exec_()
except SystemExit:
self.app.quit()
sys.exit()
except KeyboardInterrupt:
self.app.quit()
sys.exit()
except:
if self.debug:
traceback.print_exc(file=sys.stdout)
else:
pass
def main():
Command().main()
if __name__ == '__main__':
main()
|
web_crawler.py | import os
import time #used to delay processing
from functools import wraps #used to create timing wrapper
import json
import threading #required for multi-threading
import shelve
import sys
import io
import fileinput
import string
import urllib #required to open html documents
#import urllib2 #required in python 2.7
import re #required to remove html tags vie regex
import codecs #required to open html files
import nltk #requires python 3.5 or python 2.7 to install
from nltk.stem.porter import *
from nltk.corpus import stopwords #must download stopwords at least once. This is done by
#open the python shell and type:
#import nltk
#nltk.download("stopwords")
from collections import defaultdict #necessary for the proximity data structure
#
# Python EECS 767 Niche Web Crawler
# Author: Blake Bryant
# KU Student ID: 2732226
# Date started: 3/27/2018
# Date finished: xx/xx/xxxx
#
# This program requires a seed url to crawl for additional urls
# The seed URL web page will be stored as well as all other urls
# within the same domain
# ----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#variables
#-----------------------------------------------------------------------------
# the URL seed is the root of all pages to be downloaded
#url_seed = 'http://www.oldbaileyonline.org/'
#url_seed='http://www.espn.com'
#url_seed='https://www.nist.gov/'
url_seed='https://www.iso.org'
#url is the first URL that should be downloaded. This may be different from the
#url seed
#url= 'http://www.oldbaileyonline.org/browse.jsp?id=t17800628-33&div=t17800628-33'
#url='https://www.nist.gov/'
#url= 'http://www.oldbaileyonline.org/'
#url='http://www.espn.com/'
#https://www.rfc-editor.org/rfc-index.html
#https://standards.gov/sibr/query/index.cfm?fuseaction=rsibr.regulatory_sibr_all
url='https://www.iso.org/standards.html'
#https://www.loc.gov/
#https://www.ncaa.com/march-madness
#https://www.nasa.gov/
#https://www.uspto.gov/
#https://patentscope.wipo.int/search/en/sequences.jsf
#http://patentsgazette.uspto.gov/week06/OG/patenteeByName.html
#http://patentsgazette.uspto.gov/week06/OG/patenteeByType.html
#http://patentsgazette.uspto.gov/week06/OG/Cpc-a.html
#https://www.epo.org/index.html
#https://www.ama-assn.org/
#http://www.dcc.ac.uk/resources/metadata-standards/list
#https://www.nist.gov/
#https://www.ncdc.noaa.gov/cdo-web/
#cached_doc_path ='C:\Users\b589b426\Documents\_student\EECS_767\Project\cached_docs'
#this is the relative path to the directory where pages will be downloaded to
cached_doc_path = 'cached_docs/'#python 2 version
#--------End --Variables ---------------------------------------------------
#---------------------------------------------------------------------------
# ----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Global Functions
#-----------------------------------------------------------------------------
# This function adds timing data to program execution
def timing(f):
@wraps(f)
def ft(*args, **kwargs):
t0 = time.time()
exe = f(*args, **kwargs)
t1 = time.time()
print ("\t%s Execution Time (sec): %s" %
(f.__name__, str(t1-t0)))
return exe
return ft
#This function is used to integrate with threading and
# call the webcrawler.func_download_page() function
#
def download_page_thread(url_to_download,crawler):
print ('downloading:')
print (url_to_download)
try:
downloaded=crawler.func_download_page(url_to_download)
except:
print('Error calling func_download_page in loop', sys.exc_info()[0], sys.exc_info()[1])
try:
if downloaded:#check if a file was downloaded. Will be none for bad urls
crawler.func_find_urls_on_page(downloaded)
except:
print('Error calling func_find_urls_on_page in loop', sys.exc_info()[0], sys.exc_info()[1])
time.sleep(1)
#--------End --Global Functions ---------------------------------------------------
#---------------------------------------------------------------------------
# ----------------------------------------------------------------
# Class used to store values and functions used by main
# ----------------------------------------------------------------
class WebCrawler(object):
def __init__(self, url_downloaded_queue, need_to_download_queue, download_manifest):
self.url_downloaded_queue=url_downloaded_queue
self.need_to_download_queue=need_to_download_queue
self.download_manifest=download_manifest
#@timing #comment out to remove timing
def func_download_page(self,passed_url):
# open the URL
try:
#page = urllib2.urlopen(passed_url).read()# python 2 version
page = urllib.request.urlopen(passed_url).read().decode('utf-8')#python 3
except:
print ('Error using urllib to read webpage at url', sys.exc_info()[0], sys.exc_info()[1])
return None
# write the URL content to a file
try:
#remove prefix of url from url field e.g. http:// or the url_seed
#this is necessary to shorten the length of the file name
try:
#tags = re.compile(url_seed,re.DOTALL) #python 2
url_seed_regex=re.compile(url_seed,re.DOTALL) #python 3
except:
print ('Error in regex', sys.exc_info()[0], sys.exc_info()[1])
try:
#stripped_url = re.sub(tags,'',str(passed_url)) #python 2remove html tags
stripped_url=url_seed_regex.sub('',str(passed_url))
except:
print ('Error stripping prefix with regex', sys.exc_info()[0], sys.exc_info()[1])
#remove punctuation from url to allow for writing to file in windows systems
try:
#stripped_url = stripped_url.translate(None, url_seed)
#stripped_url = stripped_url.translate(None, string.punctuation)#python 2
tr=str.maketrans('','',string.punctuation)#python 3
stripped_url=stripped_url.translate(tr)#python 3
except:
print ('Error removing punctuation from url', sys.exc_info()[0], sys.exc_info()[1])
try:
encoding_regex=re.search(r'(charset=")(.*?)(")',page)#search page for encoding
encoding=encoding_regex.group(2)#store encoding into encoding variable
#ewubba
except:
print ('Error parsing document encoding', sys.exc_info()[0], sys.exc_info()[1])
try:
filename = str(cached_doc_path+stripped_url+'.htm')
#print('filename used to write file')
#print (filename)##for debugging
f= open(filename, "w", encoding="utf-8")#required to open page with specific encoding in python 3
## if encoding:
## print ('page encoding is ' +str(encoding))
## #f.write(page)#python 2
## #f.write(page).encode('"'+str(encoding)+'"')#python 3
## f.write(page)
## else:
## f.write(page)
f.write(page)
f.close
except:
print ('Error writing page to local file', sys.exc_info()[0], sys.exc_info()[1])
try:
self.url_downloaded_queue.append(passed_url)
except:
print ('Error updating url downloaded queue', sys.exc_info()[0], sys.exc_info()[1])
try:
if self.need_to_download_queue:#check if queue is empty
self.need_to_download_queue.remove(passed_url)
except:
print ('Error removing url from need_to_download_queue', sys.exc_info()[0], sys.exc_info()[1])
##Create a file to map URL to filename
try:
self.download_manifest[str(stripped_url+'.htm')]=str(passed_url)
#self.download_manifest[filename]=str(passed_url)
self.func_export_download_manifest_with_shelve()
except:
print ('Error adding url to download_manifest', sys.exc_info()[0], sys.exc_info()[1])
#try:
#download_manifest = str(cached_doc_path+'download_manifest.txt')
#f= open(download_manifest, "a")
#f.write(str('filename:'+stripped_url+' url:'+passed_url+'\n'))
#f.close
#except:
#print ('Error writing to file to store url and filename', sys.exc_info()[0], sys.exc_info()[1])
except:
print ('Error writing webpage to local file', sys.exc_info()[0], sys.exc_info()[1])
return page
#@timing #comment out to remove timing
def func_export_download_manifest_with_shelve(self):
print ('Exporting manifest to shelf .db file')
try:
d = shelve.open(cached_doc_path+'/download_manifest')
d['manifest'] = self.download_manifest
d.close()
except:
print ('Error exporting data via shelve', sys.exc_info()[0], sys.exc_info()[1])
def func_get_robots_txt(self):
try:
robo_url=str(url_seed+'robots.txt')
robots = urllib.request.urlopen(robo_url).read()
#robots = urllib2.urlopen(robo_url).read()
robots=robots.split()#splitwords into array
print ('printing robots')
print (robots)
return robots
except:
print ('Error parsing robots.txt', sys.exc_info()[0], sys.exc_info()[1])
return None
def func_find_urls_on_page(self,page):
try:
words=[] #array to store each word in the web page
#url_queue=[]
words=page.split()#split page into words
try:
#print (page)
#url_match = re.compile('(.*?http\w+)',re.DOTALL)
#result = url_match.match(page)
#print (result)
#search words for href tag
hyperlink_match = re.compile('(href=".*?")',re.DOTALL)#how to tell if a link
#hyperlink_match = re.compile('(?:href=")(.*?)(?:".*?)',re.DOTALL)#how to tell if a link
hyperlink_ignore = re.compile('(/files/live/sites/isoorg/files/_noindex.*?)|(.*?/fr/search/.*?)|(.*?/ru/search/.*?)|(.*?/search/.*?)|(.*?/advanced-search/.*?)|(.*?/fr/advanced-search/.*?)|(.*?/ru/advanced-search/.*?)|(.*?/em.*?)|(.*?.css.*?)|(.*?javascript.*?)|(.*?#.*?)|(.*?mailto.*?)|(.*?.pps)|(.*?.xps)|(.*?.pub)|(.*?.docb)|(.*?.dotm)|(.*?.dotx)|(.*?.docm)|(.*?.xlsx)|(.*?.xls)|(.*?.xlm)|(.*?.xlt)|(.*?.xltx)|(.*?.doc)|(.*?.docx)|(.*?.ppt)|(.*?.pptx)|(.*?.jpg)|(.*?.png)|(.*?.amp)|(.*?.bmp)|(.*?.mp3)|(.*?.mp4)|(.*?.avi)|(.*?.gif)|(.*?.rss)|(.*?.pdf)',re.IGNORECASE)#links to ignore, such as .css
#hyperlink_clean = re.compile('(href=")|(".*?$)|(>.*?)|(<.*?)|(.*?\s+.*?)',re.DOTALL)
hyperlink_clean = re.compile('(href=")|(".*?$)',re.DOTALL)#sanitize href and root links
for word in words:
#try:
#if word not in robots_txt:# check robots.txt
try:
if hyperlink_match.match(word): #check that the word contains "hfref=
try:
if not hyperlink_ignore.match(word): #check list of hrefs to ignore such as .css or javascript
#word = re.sub(hyperlink_clean,'',str(word))#python 2
word = hyperlink_clean.sub('',str(word))#python 3
try:
if 'http://' in word: #check to see if the link is an absolute link
if url_seed in word:#used to limit search to same website
if word not in self.url_downloaded_queue: #check to see that the URL has not already been downloaded
if word not in self.need_to_download_queue: #check that url is not already in the need to download queue
self.need_to_download_queue.append(str(word))
else:
return None
elif '../' in word: #check if relative link
word=str(url_seed+word)#append current url_seed to directory traversal
if url_seed in word:#used to limit search to same website
if word not in self.url_downloaded_queue: #check to see that the URL has not already been downloaded
if word not in self.need_to_download_queue: #check that url is not already in the need to download queue
self.need_to_download_queue.append(str(word))
elif 'https://' in word: #check to see if the link is an absolute link with https
if url_seed in word:#used to limit search to same website
if word not in self.url_downloaded_queue: #check to see that the URL has not already been downloaded
if word not in self.need_to_download_queue: #check that url is not already in the need to download queue
self.need_to_download_queue.append(str(word))
#elif word is '/':
#print ('word is just /')
else: #print word
word = str(url_seed+word)
if url_seed in word:#used to limit search to same website
if word not in self.url_downloaded_queue: #check to see that the URL has not already been downloaded
if word not in self.need_to_download_queue: #check that url is not already in the need to download queue
self.need_to_download_queue.append(word)
except:
print ('Error differentiating between remaining url types', sys.exc_info()[0], sys.exc_info()[1])
except:
print ('Error with ignore regex', sys.exc_info()[0], sys.exc_info()[1])
except:
print ('Error matching hyperlink regex', sys.exc_info()[0], sys.exc_info()[1])
#print url_queue
#except:
# print ('Error in checking robots.txt', sys.exc_info()[0], sys.exc_info()[1])
except:
print ('Error in regex', sys.exc_info()[0], sys.exc_info()[1])
#return url_queue
except:
print ('Error finding urls', sys.exc_info()[0], sys.exc_info()[1])
# Begin program
# This section of code will provide a brief introductory message and instructions of using the program
#print ("Welcome to the EECS767 web crawling")
#func_download_page(url)
#url_downloaded_queue=[]
#need_to_download_queue=[]
#-------------------------------------------------------------
# The Main Program
#
#----------------------------------------------------------
@timing #comment out to remove timing
def main():
print ("Welcome to the EECS767 web crawling program!")
crawler=WebCrawler([],[],{})
#crawler.func_get_robots_txt() ## robots.txt is hard coded into the ignore_urls within the func_find_urls_on_page function
crawler.func_find_urls_on_page(crawler.func_download_page(url))
print ('queue of urls to download:')
print (crawler.need_to_download_queue)
print ('Urls that have been downloaded:')
print (crawler.url_downloaded_queue)
#Process urls in url_to_download queue
while crawler.need_to_download_queue:
for url_to_download in crawler.need_to_download_queue:
#create a new thread for each URL
thread=threading.Thread(target=download_page_thread(url_to_download,crawler))
thread.start()
crawler.func_export_download_manifest_with_shelve()
print ('Program complete!')
if __name__ == "__main__":
main()
|
env_stock_papertrading_erl.py | import datetime
import threading
from neo_finrl.data_processors.processor_alpaca import AlpacaProcessor
import alpaca_trade_api as tradeapi
import time
import pandas as pd
import numpy as np
import torch
import sys
import os
class AlpacaPaperTrading():
def __init__(self,ticker_list, time_interval, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
APCA_API_BASE_URL, tech_indicator_list, turbulence_thresh=30,
max_stock=1e2, latency = None):
#load agent
if agent =='ppo':
try:
from elegantrl.agent import AgentPPO
agent = AgentPPO()
agent.init(net_dim, state_dim, action_dim)
agent.save_load_model(cwd=cwd, if_save=False)
self.act = agent.act
self.device = agent.device
except:
raise ValueError('Fail to load the agent! Please check path, state dimension and action_dimension.')
else:
raise ValueError('Agent input is NOT supported yet.')
#connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY,API_SECRET,APCA_API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
#read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
#read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
#initialize account
self.stocks = np.asarray([0] * len(ticker_list)) #stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None #cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def test_latency(self, test_times = 10):
total_time = 0
for i in range(0, test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time/test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if(self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time,last_equity])
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while(not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(int)
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = AlpacaProcessor(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list = self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = ( abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype = float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(max(self.cash, 1e4) * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if(qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
|
testplotlyview.py | import vdomr as vd
import time
import multiprocessing
import sys
from .stdoutsender import StdoutSender
import mtlogging
import numpy as np
class TestPlotlyView(vd.Component):
def __init__(self, context):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
self._test_plotly_widget = None
self._connection_to_init, connection_to_parent = multiprocessing.Pipe()
self._init_process = multiprocessing.Process(target=_initialize, args=(context, connection_to_parent))
self._init_process.start()
self._init_log_text = ''
vd.set_timeout(self._check_init, 0.5)
def _on_init_completed(self, init):
self._test_plotly_widget = TestPlotlyWidget()
self._test_plotly_widget.setSize(self._size)
self.refresh()
def setSize(self, size):
self._size = size
if self._test_plotly_widget:
self._test_plotly_widget.setSize(size)
def size(self):
return self._size
def tabLabel(self):
return 'Test plotly'
def render(self):
if self._test_plotly_widget:
return vd.div(
self._test_plotly_widget
)
else:
return vd.div(
vd.h3('Initializing...'),
vd.pre(self._init_log_text)
)
def _check_init(self):
if not self._test_plotly_widget:
if self._connection_to_init.poll():
msg = self._connection_to_init.recv()
if msg['name'] == 'log':
self._init_log_text = self._init_log_text + msg['text']
self.refresh()
elif msg['name'] == 'result':
self._on_init_completed(msg['result'])
return
vd.set_timeout(self._check_init, 1)
class TestPlotlyWidget(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._size = (100, 100)
self._plot = None
self._update_plot()
def setSize(self, size):
self._size = size
self._update_plot()
def _update_plot(self):
xx = np.linspace(0, 1, 10)
yy = np.cos((10 * xx)**2)
self._plot = vd.components.PlotlyPlot(
data=dict(x=xx, y=yy),
layout=dict(margin=dict(t=5)),
config=dict(),
size=self._size
)
self.refresh()
def render(self):
if not self._plot:
return vd.div('no plot.')
return self._plot
# Initialization in a worker thread
mtlogging.log(root=True)
def _initialize(context, connection_to_parent):
with StdoutSender(connection=connection_to_parent):
pass
connection_to_parent.send(dict(
name='result',
result=dict()
))
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_cond_v2_old = control_flow_ops.ENABLE_COND_V2
enable_while_v2_old = control_flow_ops.ENABLE_WHILE_V2
enable_tensor_array_v2_old = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
control_flow_ops.ENABLE_COND_V2 = True
control_flow_ops.ENABLE_WHILE_V2 = True
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = True
try:
fn(*args, **kwargs)
finally:
control_flow_ops.ENABLE_COND_V2 = enable_cond_v2_old
control_flow_ops.ENABLE_WHILE_V2 = enable_while_v2_old
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = enable_tensor_array_v2_old
return wrapper
def enable_tensor_array_v2(fn):
"""Decorator for enabling _GraphTensorArrayV2 on a test.
Note this enables _GraphTensorArrayV2 after running the test class's
setup/teardown methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_tensor_array_v2_old = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = True
try:
fn(*args, **kwargs)
finally:
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = enable_tensor_array_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_ops.ENABLE_WHILE_V2 and control_flow_ops.ENABLE_COND_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and name.startswith("test") and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
f(self, **kwargs)
else:
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_distribution_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
"test") and not name.startswith("testSkipEager"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_test_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run().
"""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This
stream must have a file descriptor, support writing via using that
file descriptor, and must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values,
tensor.dense_shape)
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield None
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
return a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = allow_soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = config_pb2.ConfigProto()
config_copy.CopyFrom(config)
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
def dismantle_polymorphic_function(func):
"""Removes reference cycles in PolymorphicFunction `func`.
Helpful for making sure the garbage collector doesn't need to run when
PolymorphicFunction goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func: A `PolymorphicFunction` object to destroy. `func` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added
cache = func._function_cache # pylint: disable=protected-access
for concrete_func in cache.values():
dismantle_func_graph(concrete_func.graph)
while cache:
cache.popitem()
memory.dismantle_ordered_dict(cache)
|
recorder.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Recorder subscribes to ROS messages and writes them to a bag file.
"""
from __future__ import print_function
try:
from queue import Queue
except ImportError:
from Queue import Queue
import re
import threading
import time
import rosbag
import rosgraph
import roslib
import rospy
import sys
class Recorder(object):
def __init__(self, filename, bag_lock=None, all=True, topics=[], regex=False, limit=0, master_check_interval=1.0):
"""
Subscribe to ROS messages and record them to a bag file.
@param filename: filename of bag to write to
@type filename: str
@param all: all topics are to be recorded [default: True]
@type all: bool
@param topics: topics (or regexes if regex is True) to record [default: empty list]
@type topics: list of str
@param regex: topics should be considered as regular expressions [default: False]
@type regex: bool
@param limit: record only this number of messages on each topic (if non-positive, then unlimited) [default: 0]
@type limit: int
@param master_check_interval: period (in seconds) to check master for new topic publications [default: 1]
@type master_check_interval: float
"""
self._all = all
self._topics = topics
self._regex = regex
self._limit = limit
self._master_check_interval = master_check_interval
self._bag = rosbag.Bag(filename, 'w')
self._bag_lock = bag_lock if bag_lock else threading.Lock()
self._listeners = []
self._subscriber_helpers = {}
self._limited_topics = set()
self._failed_topics = set()
self._last_update = time.time()
self._write_queue = Queue()
self._paused = False
self._stop_condition = threading.Condition()
self._stop_flag = False
# Compile regular expressions
if self._regex:
self._regexes = [re.compile(t) for t in self._topics]
else:
self._regexes = None
self._message_count = {} # topic -> int (track number of messages recorded on each topic)
self._master_check_thread = threading.Thread(target=self._run_master_check)
self._write_thread = threading.Thread(target=self._run_write)
@property
def bag(self):
return self._bag
def add_listener(self, listener):
"""
Add a listener which gets called whenever a message is recorded.
@param listener: function to call
@type listener: function taking (topic, message, time)
"""
self._listeners.append(listener)
def start(self):
"""
Start subscribing and recording messages to bag.
"""
self._master_check_thread.start()
self._write_thread.start()
@property
def paused(self):
return self._paused
def pause(self):
self._paused = True
def unpause(self):
self._paused = False
def toggle_paused(self):
self._paused = not self._paused
def stop(self):
"""
Stop recording.
"""
with self._stop_condition:
self._stop_flag = True
self._stop_condition.notify_all()
self._write_queue.put(self)
## Implementation
def _run_master_check(self):
master = rosgraph.Master('rqt_bag_recorder')
try:
while not self._stop_flag:
# Check for new topics
for topic, datatype in master.getPublishedTopics(''):
# Check if:
# the topic is already subscribed to, or
# we've failed to subscribe to it already, or
# we've already reached the message limit, or
# we don't want to subscribe
if topic in self._subscriber_helpers or topic in self._failed_topics or topic in self._limited_topics or not self._should_subscribe_to(topic):
continue
try:
pytype = roslib.message.get_message_class(datatype)
self._message_count[topic] = 0
self._subscriber_helpers[topic] = _SubscriberHelper(self, topic, pytype)
except Exception as ex:
print('Error subscribing to %s (ignoring): %s' % (topic, str(ex)), file=sys.stderr)
self._failed_topics.add(topic)
# Wait a while
self._stop_condition.acquire()
self._stop_condition.wait(self._master_check_interval)
except Exception as ex:
print('Error recording to bag: %s' % str(ex), file=sys.stderr)
# Unsubscribe from all topics
for topic in list(self._subscriber_helpers.keys()):
self._unsubscribe(topic)
# Close the bag file so that the index gets written
try:
self._bag.close()
except Exception as ex:
print('Error closing bag [%s]: %s' % (self._bag.filename, str(ex)))
def _should_subscribe_to(self, topic):
if self._all:
return True
if not self._regex:
return topic in self._topics
for regex in self._regexes:
if regex.match(topic):
return True
return False
def _unsubscribe(self, topic):
try:
self._subscriber_helpers[topic].subscriber.unregister()
except Exception:
return
del self._subscriber_helpers[topic]
def _record(self, topic, m):
if self._paused:
return
if self._limit and self._message_count[topic] >= self._limit:
self._limited_topics.add(topic)
self._unsubscribe(topic)
return
self._write_queue.put((topic, m, rospy.get_rostime()))
self._message_count[topic] += 1
def _run_write(self):
try:
while not self._stop_flag:
# Wait for a message
item = self._write_queue.get()
if item == self:
continue
topic, m, t = item
# Write to the bag
with self._bag_lock:
self._bag.write(topic, m, t)
# Notify listeners that a message has been recorded
for listener in self._listeners:
listener(topic, m, t)
except Exception as ex:
print('Error write to bag: %s' % str(ex), file=sys.stderr)
class _SubscriberHelper(object):
def __init__(self, recorder, topic, pytype):
self.recorder = recorder
self.topic = topic
self.subscriber = rospy.Subscriber(self.topic, pytype, self.callback)
def callback(self, m):
self.recorder._record(self.topic, m)
|
Misc.py | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from struct import pack
from collections import UserDict as IterableUserDict
from collections import OrderedDict
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from .DataType import *
from .BuildToolError import *
from CommonDataClass.DataClass import *
from .Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import uuid
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
import subprocess
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if os.path.exists(File):
try:
if isinstance(Content, bytes):
with open(File, "rb") as f:
if Content == f.read():
return False
else:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if isinstance(Content, bytes):
with open(File, "wb") as Fd:
Fd.write(Content)
else:
with open(File, "w") as Fd:
Fd.write(Content)
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
pickle.dump(Data, Fd, pickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd is not None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = pickle.load(Fd)
except Exception as e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd is not None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = list(P.Protocols.keys())
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = [x for x in P.Protocols if x not in P._PrivateProtocols]
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = list(P.Ppis.keys())
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = [x for x in P.Ppis if x not in P._PrivatePpis]
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join(S.Instantiate(Dictionary) for S in SectionList)
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict.keys():
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return self.keys()
## Values interation support
def itervalues(self):
return self.values()
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = dv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict is not None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def IsFieldValueAnArray (Value):
Value = Value.strip()
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return True
if Value.startswith('L"') and Value.endswith('"') and len(list(Value[2:-1])) > 1:
return True
if Value[0] == '"' and Value[-1] == '"' and len(list(Value[1:-1])) > 1:
return True
if Value[0] == '{' and Value[-1] == '}':
return True
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return True
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return True
return False
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
Size = len(out.split())
out = ','.join(out.decode(encoding='utf-8', errors='ignore').split())
return '{' + out + '}', Size
def ParseFieldValue (Value):
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = "{" + ','.join([str(i) for i in uuid.UUID(Value).bytes_le]) + "}"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if not Value:
IsValid = False
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Val = int(Value, 0)
except:
try:
Val = int(Value.lstrip('0'))
except:
return False, "Invalid value [%s] of type [%s];" \
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
if Val > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
if Val < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0, len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if isinstance(Other, type(self)):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if isinstance(Other, type(self)):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
return PackGUID(Guid.split('-'))
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if isinstance(Input, int):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
EDJournalLib.py | from EDJConst import *
from EDJFile import *
import time
import threading
#ユーザーが設定できるデータ
uInterval = 10
uFuction = None
#中で使用するデータ
mThread = None
mThreadFlag = True
mBSize = None
mBPath = ""
mBRow = None
#非同期用のインターバル設定
def setInterval(pMs):
global uInterval
uInterval = pMs
#非同期用のファンクション設定
def setFnc(pFnc):
global uFuction
uFuction = pFnc
#差分を取得 そのうちバイトで取得できるようにする
def getDeffJournal(pFileName, pByte):
aOut = getByteFile(pFileName, pByte)
return aOut.split("\r\n")
#一番最後の一行のを取得
def getLatestJournal():
pass
#非同期で最新のジャーナルを取得してイベントを発行する
def loggingJournal():
global mInterval
global uFuction
global mThread
global mThreadFlag
global mBSize
while mThreadFlag == True:
aFByte = getFolderSize()
if aFByte != mBSize:
aRef = getJson(aFByte - mBSize)
mBSize = aFByte
#イベント送信
uFuction(aRef)
# functools.partial(uFuction, aRef)
time.sleep(uInterval / 1000)
#自動取得開始
def startJournal():
global mThread
global mThreadFlag
global mBSize
mBSize = getFolderSize()
mThreadFlag = True
mThread = threading.Thread(target=loggingJournal)
mThread.start()
#自動取得終了
def endJournal():
global mThreadFlag
mThreadFlag = False
#初期化
def init():
global uInterval
global uFuction
global mThread
global mThreadFlag
global mBSize
global mPath
global mBRow
uInterval = 10
uFuction = None
mThread = None
mThreadFlag = True
mBSize = getFolderSize()
mBRow = None
|
ebsk.py | #!/bin/env python3
import time
import math
import sys
import getopt
from threading import Thread
DEBUG=False
class Sensor:
def __init__(self):
self.value = -1
self.file = None
self.filename = ''
def _open(self, name, mode, file = None):
if not file or file.closed:
if DEBUG:
print("Opening %s" % name)
return open(name, mode)
def _close(self, file):
#if DEBUG:
# print("Closing %s" % file.name)
file.close()
def _read(self, file, name):
try:
file = self._open(name, 'r')
except FileNotFoundError:
if DEBUG:
print('File %s was not found' % name)
return False
value = file.readline()
self._close(file)
if DEBUG:
print("READ : %s : %s" % (str(file.name),str(value)))
return value
def read(self):
self.value = self._read(self.file, self.filename)
return self.value
class Actor(Sensor):
def _write(self, name, value):
file = self._open(name, 'w')
if DEBUG:
print("WRITE : %s : %s" % (str(file.name),str(value)))
value = file.write(str(value))
self._close(file)
return value
def write(self, value):
self.value = self._write(self.filename, value)
return self.value
class LightSensor(Sensor):
def __init__(self):
Sensor.__init__(self)
self.filename = '/sys/devices/platform/applesmc.768/light'
class LightActor(Actor):
def __init__(self):
Actor.__init__(self)
self.forceOff = False
def writeForceOff(self, state=True):
name = '/tmp/ebsk_force_off_' + self.filename.replace('/', '_')
if state == True or state == 1:
self._write(name, 1)
else:
self._write(name, 0)
def checkForceOff(self):
file = None
name = '/tmp/ebsk_force_off_' + self.filename.replace('/','_')
value = self._read(file, name)
if value is False or value == "0":
self.forceOff = False
if DEBUG:
print("[%s] Force OFF ===> NO" % name)
elif value == "1":
self.forceOff = True
if DEBUG:
print("[%s] Force OFF ===> YES" % name)
return self.forceOff
def readMax(self, file=None):
if file is None:
file = self.filename.replace('brightness', 'max_brightness')
self.maxValue = int(self._read(None, file))
return self.maxValue
def write(self, value):
if self.checkForceOff() is True:
value = 0
if value <= self.maxValue:
return Actor.write(self, value)
else:
if DEBUG:
print("Value %s is out of actor-limits" % str(value))
return self.write(self.maxValue)
def fade(self, targetValue, valueStep, timeStep=0.01):
actualValue = int(self.read())
if targetValue < actualValue:
valueStep = valueStep * (-1)
elif targetValue == actualValue:
return
for x in range(int(self.read()), targetValue, valueStep):
self.write(x)
time.sleep(timeStep)
class Backlight(LightActor):
def __init__(self):
LightActor.__init__(self)
self.filename = '/sys/class/backlight/intel_backlight/brightness'
self.readMax()
class KeyboardBacklight(LightActor):
def __init__(self):
LightActor.__init__(self)
self.filename = '/sys/class/leds/smc::kbd_backlight/brightness'
self.readMax()
self.maxValue = int(self.maxValue * 0.25);
ambient = LightSensor()
backlight = Backlight()
kb_backlight = KeyboardBacklight()
def trigger_action(type, args):
if type == "acpi":
if args == "lidclose":
backlight.writeForceOff()
kb_backlight.writeForceOff()
elif args == "lidopen":
backlight.writeForceOff(False)
kb_backlight.writeForceOff(False)
def thread_bl():
print('Starting Backlight Thread')
while True:
backlight.fade(
math.ceil((backlight.maxValue / 255) * (int(ambient.read().split(',')[0][1:]) + 12)),
math.ceil(backlight.maxValue/(1024/3))
)
time.sleep(0.3)
def thread_kbbl():
print('Starting Keyboard-Backlight Thread')
while True:
# an extra if here because the ambient don't need to be read if it's forceOff
if kb_backlight.forceOff == False:
kb_backlight.fade(
math.ceil((kb_backlight.maxValue / 255) * (255 - int(ambient.read().split(',')[0][1:]))),
math.ceil(kb_backlight.maxValue / 16)
)
else:
kb_backlight.checkForceOff()
time.sleep(0.3)
def main(argv):
try:
opts, args = getopt.getopt(argv,"a::",["acpi="])
except getopt.GetoptError:
print('test.py --acpi=<acpi-action>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py --acpi=<acpi-action>')
sys.exit()
elif opt in ("--acpi"):
trigger_action(type="acpi", args=arg)
if len(opts) == 0:
t_bl = Thread(target=thread_bl)
t_kbbl = Thread(target=thread_kbbl)
t_bl.start()
t_kbbl.start()
while True:
time.sleep(10)
if __name__ == '__main__':
main(sys.argv[1:])
# Some useless code here
#while True:
#print((int(ambient.read().split(',')[0][1:]) + 10))
#print(backlight.maxValue / 255)
#print((backlight.maxValue / 255) * (int(ambient.read().split(',')[0][1:]) + 10))
# time.sleep(10)
#for x in range(0,backlight.maxValue, math.ceil(backlight.maxValue/1024)):
# backlight.write(x)
# print("Setting BL to %s" % str(x) )
# time.sleep(0.01)
#for x in range(0,kb_backlight.maxValue, math.ceil(backlight.maxValue/32)):
# kb_backlight.write(x)
# print("Setting KBBL to %s" % str(x) )
# time.sleep(0.05)
|
recognizer_observer.py | import rospy
import threading
from .observer import Observer
class RecognizerObserver(Observer):
def __init__(self, recognizers, recognizer, check_frequency=20.0, **kwargs):
super(RecognizerObserver, self).__init__(**kwargs)
self._recognizer = recognizers[recognizer]
self._check_rate = rospy.Rate(check_frequency)
self._pose = None
self._goal = None
self._lock = threading.RLock()
self._worker = threading.Thread(target=self._dispatch_event, name="recognizer_observer")
self._worker.start()
def join(self):
self._worker.join()
def update_goal(self, goal):
self._lock.acquire()
self._goal = goal
self._lock.release()
def update_pose(self, pose):
self._lock.acquire()
self._pose = pose
self._lock.release()
def _dispatch_event(self):
while not rospy.is_shutdown():
if self._pose is not None and self._goal is not None:
result, desc = self._recognizer.detect(self._pose, self._goal)
if result:
self._call_event(msg=None)
try:
self._check_rate.sleep()
except rospy.ROSInterruptException as e:
rospy.logdebug("PoseObserver: {}".format(e))
|
jdb.py | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2018 Musker.Chao
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json, threading, time
class NosqlFileNotDefineError():
"""
if Nosql File not define and
use DB.loadDB() or DB.dumpDB() function
raise NosqlFileNotDefineError
"""
pass
class _ObjLock(object):
"""Process lock Class
Process lock to prevent dirty data
"""
def __init__(self):
import threading
self.Lock = threading.RLock()
def lock(self):
return self.Lock.acquire()
def ulock(self):
return self.Lock.release()
class BaseJson:
"""dict base function
Defining processing dictionary data secondary encapsulation method
"""
def __init__(self, obj):
"""
init BaseJons class
:param obj: dict object
"""
self.obj = obj
self.objLock = _ObjLock()
def setValue(self, key, value):
"""
set an key: value
:param key: key
:param value: value
:return: None
"""
self.objLock.lock()
self.obj[key] = value
self.objLock.ulock()
def getValue(self, key):
"""
get an value
:param key: key
:return: value
"""
return self.obj.get(key)
def dropKey(self, key):
"""
delete an key
:param key: key
:return: None
"""
self.objLock.lock()
try:
self.obj.pop(key)
except KeyError:
pass
self.objLock.ulock()
def getKeys(self):
"""
get all keys
:return: keys list []
"""
keys_list = []
keys = self.obj.keys()
for k in keys:
keys_list.append(k)
return keys_list
def getValues(self):
"""
get all values
:return: values list []
"""
values_list = []
values = self.obj.values()
for v in values:
values_list.append(v)
return values_list
def getItems(self):
"""
get all key:value
:return: tuple in list [(key,value)]
"""
items_list = []
for k, v in self.obj.items():
items_list.append((k, v))
return items_list
def clearKey(self, key):
"""
clear key body
:param key: key
:return: None
"""
self.objLock.lock()
val_obj = self.obj[key]
if isinstance(val_obj, dict):
self.obj[key] = {}
elif isinstance(val_obj, list):
self.obj[key] = []
elif isinstance(val_obj, tuple):
self.obj[key] = ()
elif isinstance(val_obj, set):
self.obj[key] = set()
else:
self.obj[key] = None
self.objLock.ulock()
def lenTable(self):
"""
get now table length
:return: None
"""
return len(self.obj)
def allData(self):
"""
get now table data
:return: dict {}
"""
return self.obj
def createTable(self, tname):
"""
create table
:param subtname: table name
:return: BaseJson class object
"""
table = BaseJson({})
if self.obj.get(tname):
table = BaseJson(self.obj[tname])
else:
self.obj[tname] = table.allData()
return table
class DB:
"""
jdb2 load and dump class
"""
def __init__(self, dump=False, nosqlFile=None, dumpTime=30):
"""
init DB object
:param dump: whether to persist data
:param nosqlFile: persistent data file path
:param dumpTime: persistence interval
"""
self.db = {}
self.dump = dump
self.nosqlFile = nosqlFile
self.dumpTime = dumpTime
def loadDB(self):
"""
Load persistent data file data
:return: None
"""
if (self.dump and self.nosqlFile != None):
with open(self.nosqlFile, 'r', encoding='utf-8') as f:
self.db = json.loads(str(f.read()))
else:
raise NosqlFileNotDefineError
def dumpDB(self):
"""
persistence data
:return: None
"""
tmp_db = {}
for k, v in self.db.items():
if isinstance(v, BaseJson):
tmp_db[k] = v.allData()
else:
tmp_db[k] = v
if (self.dump and self.nosqlFile != None):
with open(self.nosqlFile, 'w', encoding='utf-8') as f:
f.write(json.dumps(tmp_db))
else:
raise NosqlFileNotDefineError
def timingump(self):
"""
start a new thread to persist data
:return: None
"""
while True:
time.sleep(int(self.dumpTime))
self.dumpDB()
def initDB(self):
"""
init jdb2 object
Load nosqlFile data and generate db object if nosqlFile has data,
Create an empty db object if nosqlFile has no data
:return: JsonDB object
"""
if self.dump:
with open(self.nosqlFile, 'r', encoding='utf-8') as f:
if len(f.read()) > 0:
self.loadDB()
t = threading.Thread(target=self.timingump)
t.start()
return self.db
class NoSql:
"""
jdb2 main Class
"""
def __init__(self, dump=False, nosqlFile=None, dumpTime=30):
"""
get DB object param and init db obj
:param dump: whether to persist data
:param nosqlFile: persistent data file path
:param dumpTime: persistence interval
"""
self.dump = dump
self.nosqlFile = nosqlFile
self.dumpTime = dumpTime
self.db = DB(self.dump, self.nosqlFile, self.dumpTime).initDB()
def createDB(self, dbname):
"""
create an database
:param dbname: database name
:return:
"""
one_db = BaseJson({})
if self.db.get(dbname):
one_db = BaseJson(self.db[dbname])
else:
self.db[dbname] = one_db
return one_db
|
runners.py | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph based runners."""
import contextlib
import os
import re
import threading
import time
from absl import flags
from lingvo import pdb_wrapper
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.core import summary_utils
from lingvo.core import tpu_embedding_layers
import numpy as np
from lingvo import base_runner
from google.protobuf import text_format
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
# useful for debugging.
def StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if py_utils.IsEagerMode():
raise RuntimeError('The controller job is not supported in eager mode.')
self._job_name = 'controller'
assert not self._model_task_name, 'Controller needs all tasks!'
self._control_dir = os.path.join(self._logdir, 'control')
tf.io.gfile.makedirs(self._control_dir)
self._checkpoint_in_controller = True
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpoint_in_controller = False
if self._early_stop:
tf.logging.warning('Controller ignoring early_stop since '
'TrainerTpu is driving training.')
self._early_stop = None
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
if self._checkpoint_in_controller:
self._checkpointer = self._CreateCheckpointer(
self._train_dir,
self._model,
init_op=self._initialize_global_vars)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = summary_utils.ModelAnalysis(
self._model, FLAGS.inspect_model_topn, FLAGS.inspect_model_part_regex)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
self._WriteToLog(
text_format.MessageToString(self.params.ToProto(), as_utf8=True),
self._control_dir, 'params.pbtxt')
self._summary_writer.add_graph(self._graph)
def Start(self):
super().Start()
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
with tf.container(self._container_id), self._GetSession() as sess:
if FLAGS.interactive:
# Into interactive debugging mode.
StartShell(locals())
return
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
for task in self._model.tasks:
task.input.Initialize(sess)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
if not self._checkpoint_in_controller:
global_step = self._WaitUntilInit(sess)
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
if self._checkpoint_in_controller:
# Init/restore variable if needed.
self._checkpointer.RestoreIfNeeded(sess)
global_step = sess.run(self._model.global_step)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if self._checkpoint_in_controller:
self._checkpointer.Save(sess, global_step)
sess.close()
self._DequeueThreadComplete()
return
if self._checkpoint_in_controller:
# Checkpoint if it's time.
self._checkpointer.MaybeSave(sess, global_step)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
global_step, summary_str = sess.run(
[self._model.global_step, self._summary_op])
next_summary_step = global_step + summary_interval_steps
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
tf.logging.info('Write summary @%s', global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
tf.logging.info('Write summary done: step %d', global_step)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, step, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), step)
class Trainer(base_runner.BaseRunner):
"""Trainer on non-TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'trainer'
with self._graph.as_default(), tf.container(self._container_id):
try:
self._task_probs_summary_writers = []
for task in self._model.task_schedule.tasks:
path = os.path.join(os.path.join(self._train_dir, task))
tf.io.gfile.makedirs(path)
self._task_probs_summary_writers.append(
self._CreateSummaryWriter(path))
except AttributeError:
tf.logging.info('AttributeError. Expected for single task models.')
self._task_probs_summary_writers = []
if self.params.cluster.task == 0:
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
else:
self._summary_writer = None
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._CreateTF2SummaryOps()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._step_rate_tracker = summary_utils.StepRateTracker()
if self.params.cluster.task == 0:
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
worker_id = self.params.cluster.task
self._start_up_delay_steps = (((worker_id + 1) * worker_id / 2) *
self.params.train.start_up_delay_steps)
def _SummarizeValue(self, steps, tag, value, writer=None):
if writer:
writer.add_summary(metrics.CreateScalarSummary(tag, value), steps)
elif self._summary_writer:
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def Start(self):
super().Start()
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super()._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
for task in self._model.tasks:
task.input.Initialize(sess)
global_step = self._WaitUntilInit(sess, self._start_up_delay_steps)
status_interval_steps = 100
next_status_step = 1
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
if self._early_stop:
time.sleep(300) # controller hangs if it doesn't finish first
self._DequeueThreadComplete()
return
# If a task is explicitly specified, only train that task.
if self._model_task_name:
task = self._model.GetTask(self._model_task_name)
else:
# Note: This is a slightly stale global_step value from the previous
# sess.run() call.
# For multi-task models, `self._model.task_schedule.cur_probs` will
# be updated.
task = self._model.SampleTask(global_step)
if self._task_probs_summary_writers:
for index, prob in enumerate(self._model.task_schedule.cur_probs):
self._SummarizeValue(global_step, 'task_probability', prob,
self._task_probs_summary_writers[index])
try:
for index, task in enumerate(self._model.tasks):
self._SummarizeValue(global_step, 'task_weight',
sess.run(task.vars.task_weight),
self._task_probs_summary_writers[index])
except AttributeError:
pass
(_, eval_metrics, per_example_tensors) = sess.run([
task.train_op,
task.eval_metrics,
task.per_example_tensors,
])
# Explicitly fetch global_step after running train_op.
# TODO(b/151181934): Investigate this behavior further.
task_global_step = sess.run(task.global_step)
task.ProcessFPropResults(sess, task_global_step, eval_metrics,
per_example_tensors)
self._RunTF2SummaryOps(sess)
global_step = sess.run(self._model.global_step)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step, eval_metrics['num_samples_in_batch'][0]))
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
if global_step >= next_status_step:
self._SetStatusMessage(msg)
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate)
next_status_step = global_step + status_interval_steps
else:
tf.logging.info(msg)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if py_utils.IsEagerMode():
raise RuntimeError('TrainerTpu is not supported in eager mode. '
'Please run with --use_executor '
'(or --job=executor_tpu if running locally).')
self._job_name = 'trainer_tpu'
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._step_rate_tracker = summary_utils.StepRateTracker()
self._compile_op = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitUntilInitTpu():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
if self.params.train.tpu_computation_shape is None:
computation_shape = py_utils.ComputationShape(num_devices_per_split,
topology)
else:
computation_shape = self.params.train.tpu_computation_shape
assert num_devices_per_split == np.prod(computation_shape)
if self.params.train.tpu_device_order_mode is None:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism)
else:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism,
device_order_mode=self.params.train.tpu_device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitUntilInitTpu()
with self._graph.as_default(), tf.container(
self._container_id), contextlib.ExitStack() as stack:
if FLAGS.pdb_on_exception:
stack.enter_context(pdb_wrapper.catch_post_mortem())
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._task = self._model.GetTask()
self._task.input.TpuSetup()
self._eval_metrics = metrics.TpuEvalMetrics()
# Needed due to the AddExtraTheta() reference to global_step when
# instantiating the InputGenerator.
_ = py_utils.GetOrCreateGlobalStepVar()
self._CreateTF2SummaryOps()
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model.ConstructFPropBPropGraph()
tpu_embedding_collection = (
tpu_embedding_layers.TpuEmbeddingCollection.Get())
self._load_ops = tpu_embedding_collection.load_ops
self._retrieve_ops = tpu_embedding_collection.retrieve_ops
self._tpu_embedding = tpu_embedding_collection.tpu_embedding
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._task.eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(self._task.per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._task.train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._task.per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
def _ConstructPostTrainingLoop(train_loop_op, outfeed_dequeue_op):
"""Returns the op for tpu training with tail cpu computation."""
# Adds a tail computation that is run after the tpu_training loop
# step finishes. This allows us to run certain computation that
# acts on the variable between tpu_train_loop iterations and
# amortizing the cost of the operations. Alternative of running
# tpu.outside_compilation & using tf.cond is expensive.
with tf.control_dependencies(train_loop_op):
self._model.ConstructPostTrainingLoop(outfeed_dequeue_op)
with tf.control_dependencies([self._task.post_training_loop_op]):
return ([[tf.identity(o) for o in train_loop_op],
outfeed_dequeue_op])
# Get metric result from a single replica; they are all same here.
all_tpu_ops = [t[0] for t in batch_parallel_res]
self._tpu_train_ops = (
_ConstructPostTrainingLoop(all_tpu_ops, outfeed_dequeue_op))
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._initialize_tables = tf.tables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer = checkpointer.Checkpointer(
self._train_dir, self._model, init_op=self._initialize_global_vars)
self.enqueue_ops = self._task.input.tpu_infeed_op
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
if self._task.input.input_data_summary_layout is not None:
self._summary_writer.add_summary(
self._task.input.input_data_summary_layout)
if FLAGS.checkpoint_in_trainer_tpu:
self._model_analysis, self._total_num_params = (
summary_utils.ModelAnalysis(self._model, FLAGS.inspect_model_topn,
FLAGS.inspect_model_part_regex))
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._train_dir,
'model_analysis.txt')
# Saves the trainer params.
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
def _GetSession(self, **kwargs):
return super()._GetSession(cluster_def=self._worker_cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = []
for i in range(len(tensor_shapes)):
output_arrays.append(
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]))
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def _CleanUp(self):
# If there's an exception, we want _LoopEnqueue to wait until
# everything is initialized before starting up.
self._initialized.clear()
def Start(self):
super().Start()
# Run training.
self._RunLoop('trainer', self._Loop, cleanup_func=self._CleanUp)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
tf.logging.info('_LoopEnqueue waiting for _initialized...')
self._initialized.wait()
tf.logging.info('_LoopEnqueue proceeding.')
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer.RestoreGlobalStepIfNeeded(sess)
# Get merged summary op for training related input data stats from the
# tasks's input generator.
self._merged_input_data_summary_op = (
self._task.input.merged_input_data_summary_op)
return super()._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
self._DequeueThreadComplete()
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
if FLAGS.run_locally == 'tpu':
sess.run(self._initialize_global_vars)
self._SetStatusMessage('Compiling ...')
compilation_result = sess.run(self._compile_op)
comp_result_proto = tpu_compilation_result.CompilationResultProto()
comp_result_proto.ParseFromString(compilation_result)
if comp_result_proto.status_error_message:
tf.logging.fatal('Compilation failed: {}'.format(
comp_result_proto.status_error_message))
self._SetStatusMessage('Compiling done.')
if FLAGS.checkpoint_in_trainer_tpu:
# For b/134415393 -- better to initialize to a known state than
# rely on what's in the session on the trainer/TPU worker.
tf.logging.info('TrainerTpu: Force restore or initialize.')
self._checkpointer.Restore(sess, force_reinitialize=True)
global_step = sess.run(self._model.global_step)
self._initialized.set()
eval_metrics = None
if FLAGS.checkpoint_in_trainer_tpu and global_step == 0:
# Always save a ckpt at step 0.
self._checkpointer.MaybeSave(sess, global_step)
sess.run(self._load_ops)
while True:
train_steps_start = time.perf_counter()
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self._checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(
global_step, eval_metrics) or self._ShouldEarlyStop(sess):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well. Otherwise, the enqueue thread may be stuck, e.g.,
# when the queue is filled and the enqueue thread is blocked when
# pushing new data to the queue, if the trainer thread decides to
# early stop (i.e., `self._ShouldEarlyStop(sess)` is true), then the
# enqueue thread could be blocked forever as the trainer thread would
# never consume any new data from the queue. After setting the new
# max step horizon, the trainer thread would continue run for 3 loops
# (3K global steps usually), so the enqueue thread could get a chance
# to move forward and run `_ShouldStop()` to stop gracefully.
# Updated this to account for `tpu_infeed_parallelism` which could
# allow for more enqueue threads to get further ahead of the traiiner
# thread.
if self._max_steps_for_early_stop is None:
tpu_infeed_parallelism = self._task.input.params.tpu_infeed_parallelism
self._max_steps_for_early_stop = global_step + 3 * tpu_infeed_parallelism * self._steps_per_loop
tf.logging.info('Early stopping at step: %d',
self._max_steps_for_early_stop)
if self._ShouldStop(sess, global_step, check_early_stop=False):
tf.logging.info('Training finished.')
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer.Save(sess, global_step)
self._DequeueThreadComplete()
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
tpu_train_op_start = time.perf_counter()
values, outfeeds = sess.run(self._tpu_train_ops)
tpu_train_op_secs = time.perf_counter() - tpu_train_op_start
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
self._eval_metrics.PackMetricsValues(values)
eval_metrics = self._eval_metrics.metrics
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
task_global_step = sess.run(self._task.global_step)
global_step = sess.run(self._model.global_step)
if not self._task.per_example_tensors:
outfeeds = {}
self._task.ProcessFPropResults(sess, task_global_step, eval_metrics,
outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step,
eval_metrics['num_samples_in_batch'][0] * self._steps_per_loop))
self._RunTF2SummaryOps(sess)
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
if FLAGS.checkpoint_in_trainer_tpu:
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
# Add model eval metrics to early stop metric history.
for metric_name, (metric_value, _) in eval_metrics.items():
self._UpdateEarlyStopMetric('train', global_step, metric_name,
metric_value)
checkpoint_write_secs = 0.0
if FLAGS.checkpoint_in_trainer_tpu:
checkpoint_write_start = time.perf_counter()
checkpoint_saved = self._checkpointer.MaybeSave(sess, global_step)
if checkpoint_saved:
checkpoint_write_secs = time.perf_counter() - checkpoint_write_start
train_steps_secs = time.perf_counter() - train_steps_start
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate,
tpu_train_op_secs=tpu_train_op_secs,
checkpoint_write_secs=checkpoint_write_secs,
total_train_steps_secs=train_steps_secs,
**{k: v[0] for k, v in eval_metrics.items()})
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self._export = eval_type == 'train'
if not self._export:
tf.logging.info(f'Job {self._job_name} will not export the model.')
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.io.gfile.makedirs(self._eval_dir)
self._eval_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._eval_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._CreateTF2SummaryWriter(self._eval_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropGraph()
self._task = self._model.GetTask(self._model_task_name)
self._checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def Start(self):
super().Start()
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._eval_path:
self._EvalOnce(sess, self._eval_path)
self._UpdateProcessedCheckpoints(self._eval_dir, self._eval_path)
elif self._task.params.eval.eval_all_checkpoints:
self._RunOnAllCheckpoints(sess, self._EvalOnce, self._eval_dir)
else:
self._RunOnLatestCheckpoints(sess, self._EvalOnce, self._eval_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(sess, path)
def EvalCheckpoint(self, ckpt_id):
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = '{}/ckpt-{:08d}'.format(self._train_dir, ckpt_id)
self._EvalOnce(sess, path)
def _RemoveScalarSummaries(self, summaries):
proto = summary_pb2.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def _EvalOnce(self, sess, path):
"""Runs evaluation for a batch of samples.
Args:
sess: the tf Session.
path: checkpoint path.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self._checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Save any additional information to disk before evaluation.
if self._export:
self._task.Export(path)
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._task.params.eval.start_eval_after:
return
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input_generator.Reset(sess)
metrics_dict = {
name: metrics.AverageMetric() for name in self._task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
samples_per_summary = self._task.params.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
while samples_per_summary == 0 or (num_samples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_samples_metric.total_value == 0
# NOTE: We intentionally do not let FProp generate scalar summaries by
# default, because evaler calls FProp multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should update eval_metrics and generate aggregate
# summaries. Other types of summaries (images, audio etc.) will be
# generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
ans, summaries = sess.run([self._task.eval_metrics, self._summary_op])
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
ans = sess.run(self._task.eval_metrics)
for name, (value, weight) in ans.items():
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value, samples_per_summary)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
self._RunTF2SummaryOps(sess)
summaries = {k: v.Summary(k) for k, v in metrics_dict.items()}
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_samples_metric.total_value)
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step,
summaries,
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
# Get merged summaries for input data stats logged by the tasks's input
# generator and write summaries for the stats.
if self._task.input.merged_input_data_summary_op is not None:
input_stats_summary_str = sess.run(
self._task.input.merged_input_data_summary_op)
self._WriteInputDataStatSummaries(input_stats_summary_str, global_step)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, metrics_dict, path)
def GetDecoderDir(logdir, decoder_type, model_task_name):
if model_task_name:
decoder_dir = '%s_%s' % (decoder_type, model_task_name)
else:
decoder_dir = decoder_type
return os.path.join(logdir, decoder_dir)
def _GetCheckpointIdForDecodeOut(ckpt_id_from_file, global_step):
"""Retrieve the checkpoint id for the decoder out file.
Compares the checkpoint id found in the checkpoint file name to global
step. If they diverge, uses the retrieved id and prints a warning.
Args:
ckpt_id_from_file: Checkpoint Id from the checkpoint file path.
global_step: int specifying the global step of the model.
Returns:
Checkpoint id as int.
"""
tf.logging.info('Loaded checkpoint is at global step: %d', global_step)
tf.logging.info('Checkpoint id according to checkpoint path: %d',
ckpt_id_from_file)
if global_step != ckpt_id_from_file:
tf.logging.warning(
'Checkpoint id %d != global step %d. '
'Will use checkpoint id from checkpoint file for '
'writing decoder output.', ckpt_id_from_file, global_step)
return ckpt_id_from_file
class Decoder(base_runner.BaseRunner):
"""Decoder."""
def __init__(self, decoder_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'decoder_' + decoder_type
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._decoder_dir = GetDecoderDir(self._logdir, self._job_name,
self._model_task_name)
tf.io.gfile.makedirs(self._decoder_dir)
self._decode_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._decode_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._decoder_dir)
self._CreateTF2SummaryWriter(self._decoder_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._task = self._model.GetTask(self._model_task_name)
# Note, different graphs are being constructed for different model
# tasks, which may result in different node names being chosen.
# Obviously, variable names has to be stay the same between train and
# decode.
input_batch, self._dec_output = self._model.ConstructDecodeGraph(
self._model_task_name)
for key in self._task.input_generator.GetCpuPassthroughKeys():
if key in input_batch:
if key in self._dec_output:
tf.logging.warning(f'Key {key} already present in decode output. '
f'Not adding from input batch.')
else:
self._dec_output[key] = input_batch[key]
self._summary_op = tf.summary.merge_all()
self._checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for decoder models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._decoder_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._decoder_dir,
'%s.pbtxt' % self._job_name)
def Start(self):
super().Start()
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
with tf.container(self._container_id), self._cluster, self._GetSession(
inline=False) as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._decode_path:
self.DecodeCheckpoint(sess, self._decode_path)
py_utils.UpdateProcessedCheckpoints(self._decoder_dir,
self._decode_path)
elif self._task.params.eval.decode_all_checkpoints:
self._RunOnAllCheckpoints(sess, self.DecodeCheckpoint,
self._decoder_dir)
else:
self._RunOnLatestCheckpoints(sess, self.DecodeCheckpoint,
self._decoder_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Decoding finished.')
@classmethod
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id):
"""Gets the path to decode out file."""
out_dir = cls._GetTtlDir(decoder_dir, duration='7d')
return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
def GetCkptIdFromFile(self, checkpoint_path):
return int(re.sub(r'.*ckpt-', '', checkpoint_path))
def _RemoveScalarSummaries(self, summaries):
proto = tf.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def DecodeCheckpoint(self, sess, checkpoint_path):
"""Decodes `samples_per_summary` examples using `checkpoint_path`."""
p = self._task.params
ckpt_id_from_file = self.GetCkptIdFromFile(checkpoint_path)
if ckpt_id_from_file < p.eval.start_decoder_after:
return
samples_per_summary = p.eval.decoder_samples_per_summary
if samples_per_summary is None:
samples_per_summary = p.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
self._checkpointer.RestoreFromPath(sess, checkpoint_path)
global_step = sess.run(py_utils.GetGlobalStep())
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input.Reset(sess)
dec_metrics = self._task.CreateDecoderMetrics()
if not dec_metrics:
tf.logging.info('Empty decoder metrics')
return
buffered_decode_out = []
num_examples_metric = dec_metrics['num_samples_in_batch']
start_time = time.time()
while samples_per_summary == 0 or (num_examples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_examples_metric.total_value == 0
tf.logging.info('Fetching dec_output.')
fetch_start = time.time()
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=False)
# NOTE: We intentionally do not generate scalar summaries by
# default, because decoder is run multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should generate aggregate summaries using
# PostProcessDecodeOut. Other types of summaries (images, audio etc.)
# will be generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
dec_out, summaries = sess.run([self._dec_output, self._summary_op],
options=run_options)
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
dec_out = sess.run(self._dec_output, options=run_options)
self._RunTF2SummaryOps(sess)
post_process_start = time.time()
tf.logging.info('Done fetching (%f seconds)' %
(post_process_start - fetch_start))
decode_out = self._task.PostProcessDecodeOut(dec_out, dec_metrics)
if decode_out:
if isinstance(decode_out, dict):
decode_out = decode_out.items()
if is_first_loop:
# Add summaries only for the first batch of data.
for key, value in decode_out:
if isinstance(value, tf.Summary):
tf.logging.info(f'Adding summary {key} with tags '
f'{[x.tag for x in value.value]}.')
self._summary_writer.add_summary(value, global_step)
self._summary_writer.flush()
buffered_decode_out.extend(
kv for kv in decode_out if not isinstance(kv[1], tf.Summary))
tf.logging.info(
'Total examples done: %d/%d '
'(%f seconds decode postprocess)', num_examples_metric.total_value,
samples_per_summary,
time.time() - post_process_start)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
tf.logging.info('Done decoding ckpt: %s', checkpoint_path)
summaries = {k: v.Summary(k) for k, v in dec_metrics.items()}
elapsed_secs = time.time() - start_time
example_rate = num_examples_metric.total_value / elapsed_secs
summaries['examples/sec'] = metrics.CreateScalarSummary(
'examples/sec', example_rate)
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_examples_metric.total_value)
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._decoder_dir),
global_step,
summaries,
text_filename=os.path.join(self._decoder_dir,
'score-{:08d}.txt'.format(global_step)))
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
decode_checkpoint=int(global_step),
dec_metrics=dec_metrics,
example_rate=example_rate)
# global_step and the checkpoint id from the checkpoint file might be
# different. For consistency of checkpoint filename and decoder_out
# file, use the checkpoint id as derived from the checkpoint filename.
checkpoint_id = _GetCheckpointIdForDecodeOut(ckpt_id_from_file, global_step)
decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)
decode_finalize_args = base_model.DecodeFinalizeArgs(
decode_out_path=decode_out_path, decode_out=buffered_decode_out)
self._task.DecodeFinalize(decode_finalize_args)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, dec_metrics, checkpoint_path)
def DecodeLatestCheckpoint(self, last_path=None):
"""Runs decoder on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already decoded.')
return
self.DecodeCheckpoint(sess, path)
|
run.py | '''
@Descripttion: 捉取整个网络的所有页面链接下来!
@Author: BerryBC
@Version: 0.3.0
@Date: 2020-02-02 11:15:41
@LastEditors: BerryBC
@LastEditTime: 2020-05-21 21:27:40
'''
from Lib.LMongoDB import claMongoDB
from Lib.LAddPage import claAddPage
from Lib.LLearn import claLearn
from configobj import ConfigObj
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import asyncio
import aiohttp
import threading
import time
import random
import gc
strCfgPath = './cfg/dbCfg.ini'
objParam = ConfigObj(strCfgPath)
intHowManyProxy = int(objParam['param']['HowManyProxy'])
intHowManyPageOneTime = int(objParam['param']['HowManyPageOneTime'])
intLessThenFail = int(objParam['param']['LessThenFail'])
intRequestTimeout = int(objParam['param']['RequestTimeout'])
intSemaphore = int(objParam['param']['Semaphore'])
intDeleteTime = int(objParam['param']['DeleteTime'])
intReusableRepeatTime = int(objParam['param']['ReusableRepeatTime'])
intNewRepeatTime = int(objParam['param']['NewRepeatTime'])
intDeleteRepeatTime = int(objParam['param']['DeleteRepeatTime'])
intReusableFreq = int(objParam['param']['ReusableFreq'])
intDeletFreq = int(objParam['param']['DeletFreq'])
intCreatClfFreq = int(objParam['param']['CreatFreq'])
strDirForClf = objParam['param']['ClfDir']
objLinkDB = claMongoDB(strCfgPath, 'mongodb')
objAddPage = claAddPage(objLinkDB)
objLearn = claLearn(objLinkDB, strDirForClf)
# dictHeader = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.78 Safari/537.36',
# 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'cache-control': 'no-cache',
# 'Pragma': 'no-cache'}
dictNowRepeatTime = {'t': 28}
def funMain():
# funSpyReusablePage()
# funSpyNewPage()
# funDeleteOldPage()
# if dictNowRepeatTime['t'] % intReusableFreq == 0:
# funSpyReusablePage()
# elif dictNowRepeatTime['t'] % intDeletFreq == 0:
# funDeleteOldPage()
# else:
# funSpyNewPage()
# dictNowRepeatTime['t'] += 1
# funMain()
# 测试用暂时注释
while True:
if dictNowRepeatTime['t'] % intReusableFreq == 0:
funSpyReusablePage()
elif dictNowRepeatTime['t'] % intDeletFreq == 0:
funDeleteOldPage()
elif dictNowRepeatTime['t'] % intCreatClfFreq == 0:
funCreatClf()
elif dictNowRepeatTime['t'] >= intReusableFreq*intDeletFreq*intCreatClfFreq:
dictNowRepeatTime['t'] = 1
else:
funSpyNewPage()
dictNowRepeatTime['t'] += 1
# threading.Thread(target=funSpyReusablePage).start()
# threading.Thread(target=funSpyNewPage).start()
# threading.Thread(target=funDeleteOldPage).start()
# print(type(objLinkDB))
# for dd in objLinkDB.LoadRandomLimit('proxydb',{"fail": {"$lte": 8}},20):
# print(dd)
# print(objLinkDB.CheckOneExisit('proxydb',{'u':'106.85.133.109'}))
# threading.Timer(60*intNewRepeatTime, funMain).start()
def funSpyReusablePage():
# try:
objLinkDB.CleanMySelf()
# intRandMin = random.randint(10, 60)*60
# print(' Reusable sleep time is : '+str(intRandMin/60)+' mins')
print(' Reusable begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
arrTarget = []
curTarget = objLinkDB.LoadAllData('pagedb-Reuseable')
for eleTarget in curTarget:
arrTarget.append(eleTarget['url'])
# print(arrTarget)
# 异步
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# semaphore = asyncio.Semaphore(intSemaphore)
# waittask = asyncio.gather(
# *([funSpyWeb(strWebSite, semaphore) for strWebSite in arrTarget]))
# loop.run_until_complete(waittask)
# loop.close()
for eleTarget in arrTarget:
# time.sleep(intRandMin)
funSpyWeb(eleTarget, "p")
# except Exception as e:
# print(' Error of MongoDB at "funSpyReusablePage" ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
# threading.Timer(60*intReusableRepeatTime, funSpyReusablePage).start()
print(' Reusable end : '+time.strftime('%Y-%m-%d %H:%M:%S'))
def funSpyNewPage():
# try:
objLinkDB.CleanMySelf()
print(' New begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
arrTarget = []
curRoot = objLinkDB.LoadAllData('pagedb-Custom')
for eleRoot in curRoot:
strRURL = eleRoot['rURL']
strTag = eleRoot['tag']
curTarget = objLinkDB.LoadRandomLimit(
'pagedb-Crawled', {'url': {'$regex': strRURL, '$options': "i"}, "ced": False}, intHowManyPageOneTime)
for eleTarget in curTarget:
objLinkDB.UpdateOneData(
'pagedb-Crawled', {'_id': eleTarget['_id']}, {'ced': True})
# arrTarget.append(eleTarget['url'])
funSpyWeb(eleTarget['url'], strTag)
# print(arrTarget)
del curTarget
gc.collect()
# 异步
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# semaphore = asyncio.Semaphore(intSemaphore)
# waittask = asyncio.gather(
# *([funSpyWeb(strWebSite, semaphore) for strWebSite in arrTarget]))
# loop.run_until_complete(waittask)
# loop.close()
# for eleTarget in arrTarget:
# funSpyWeb(eleTarget)
# except Exception as e:
# print(' Error of MongoDB at "funSpyNewPage" ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
# threading.Timer(60*intNewRepeatTime, funSpyNewPage).start()
print(' New end : '+time.strftime('%Y-%m-%d %H:%M:%S'))
def funDeleteOldPage():
# try:
objLinkDB.CleanMySelf()
print(' Delete begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
intNow = int(time.time()*1000)
curDelete = objLinkDB.DeleteSome(
'pagedb-Crawled', {'t': {'$lt': intNow-intDeleteTime}, 'ced': False})
print(' Delete URL Number : ' + str(curDelete.deleted_count))
curDelete = objLinkDB.DeleteSome(
'sampledb', {'t': {'$lt': (intNow-(intDeleteTime)*3)}, 'cf': False})
print(' Delete Content Number : ' + str(curDelete.deleted_count))
# print(intNow)
# except Exception as e:
# print(' Error of MongoDB at "funDeleteOldPage" ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
# # threading.Timer(60*intDeleteRepeatTime, funDeleteOldPage).start()
print(' Delete end : '+time.strftime('%Y-%m-%d %H:%M:%S'))
# 我屈服了,我还是选择做一个同不的再躲开算了
# async def funSpyWeb(eleWeb, inSemaphore):
def funSpyWeb(eleWeb, strInTag):
# try:
# intRandMin = random.randint(1, 60)/1000
# time.sleep(intRandMin)
# async with inSemaphore:
bolRetry = True
intTryTime = 0
# arrProxy=[]
# for eleProxy in objLinkDB.LoadRandomLimit('proxydb', {"fail": {"$lte": intLessThenFail}},intHowManyProxy):
# arrProxy.append(arrProxy)
# print(type(arrProxy))
curProxy = objLinkDB.LoadRandomLimit(
'proxydb', {"fail": {"$lte": intLessThenFail}}, intHowManyProxy)
arrProxy = list(curProxy)
intProxyLen = len(arrProxy)
del curProxy
gc.collect()
# print(intProxyLen)
# try:
# async with aiohttp.ClientSession() as session:
# 判断是否需要重试以及是否所有代理均已用完
while (bolRetry and (intTryTime < intProxyLen)):
try:
# 导入代理
strProxyToSpy = "http://" + \
arrProxy[intTryTime]["u"] + \
":"+arrProxy[intTryTime]["p"]
# print(strProxyToSpy)
# 异步请求网页内容
# async with session.get(eleWeb, proxy=strProxyToSpy, timeout=intRequestTimeout, headers=dictHeader) as res:
# if res.status == 200:
# strhtml = await res.text()
# soup = BeautifulSoup(strhtml, 'lxml')
# aFromWeb = soup.select('a')
# for eleA in aFromWeb:
# objAddPage.AddToDB(eleA.get('href'))
# arrWebP=soup.select('p')
# objAddPage.AddPContent(arrWebP)
# # print(result)
# bolRetry = False
# # print(" After " + str(intTryTime) +
# # " time, success reach " + eleWeb)
# 添加 JS 渲染方法
options = Options()
prefs = {'profile.default_content_setting_values': {
'notifications': 2}}
options.add_experimental_option('prefs', prefs)
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('--hide-scrollbars')
options.add_argument('blink-settings=imagesEnabled=false')
options.add_argument('--headless')
options.add_argument('--incognito')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-software-rasterizer')
options.add_argument('--disable-extensions')
options.add_argument(
'--user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36"')
options.add_argument('--window-size=1280x1024')
options.add_argument('--start-maximized')
options.add_argument('--disable-infobars')
if random.randint(0, 28) != 1:
options.add_argument('--proxy-server='+strProxyToSpy)
# options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' )
# browser = webdriver.PhantomJS('/usr/bin/chromedriver',chrome_options = options)
browserChorme = webdriver.Chrome(
'/usr/bin/chromedriver', chrome_options=options)
# browser = webdriver.Chrome('C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe',chrome_options = options)
browserChorme.set_page_load_timeout(intRequestTimeout)
browserChorme.set_script_timeout(intRequestTimeout)
browserChorme.implicitly_wait(intRequestTimeout*4.5)
browserChorme.get(eleWeb)
strhtml = browserChorme.page_source
if strhtml != '<html><head></head><body></body></html>':
time.sleep(int(intRequestTimeout*4))
strhtml = browserChorme.page_source
browserChorme.close()
browserChorme.quit()
# input=browser.find_element_by_class_name('zu-top-question')
# print(input)
soup = BeautifulSoup(strhtml, 'lxml')
aFromWeb = soup.select('a')
for eleA in aFromWeb:
objAddPage.AddToDB(eleA.get('href'), eleWeb)
arrWebP = soup.select(strInTag)
intJudEmo = objLearn.JudContent(arrWebP, False)
objAddPage.AddPContent(arrWebP, eleWeb, intJudEmo)
# print(result)
bolRetry = False
# print(" After " + str(intTryTime) +
# " time, success reach " + eleWeb)
else:
intTryTime += 8
browserChorme.close()
browserChorme.quit()
# print(' Fail ' + str(intTryTime) + ' time')
except Exception as e:
intTryTime += 1
browserChorme.close()
browserChorme.quit()
# print(" Get method error : " + str(e))
# print(' Fail ' + str(intTryTime) + ' time')
finally:
options = False
browserChorme = False
del options
del browserChorme
# except Exception as e:
# print(" Session error : " + str(e))
# except Exception as e:
# print(' Error of MongoDB at "funSpyWeb" ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
def funCreatClf():
# try:
objLinkDB.CleanMySelf()
print(' CreatClf begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
objLearn.CreatNewClf()
# except Exception as e:
# print(' Error of MongoDB at "funCreatClf" ' +
# time.strftime('%Y-%m-%d %H:%M:%S'))
print(' CreatClf end : '+time.strftime('%Y-%m-%d %H:%M:%S'))
if __name__ == "__main__":
print('Program begin : '+time.strftime('%Y-%m-%d %H:%M:%S'))
funMain()
|
caching_test.py | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""st.caching unit tests."""
import threading
import types
import unittest
from unittest.mock import patch, Mock
from parameterized import parameterized
import streamlit as st
from streamlit import caching
from streamlit import hashing
from streamlit.elements import exception
from streamlit.error_util import _GENERIC_UNCAUGHT_EXCEPTION_TEXT
from streamlit.proto.Alert_pb2 import Alert
from streamlit.proto.Exception_pb2 import Exception as ExceptionProto
from tests import testutil
class CacheTest(testutil.DeltaGeneratorTestCase):
def tearDown(self):
# Some of these tests reach directly into _cache_info and twiddle it.
# Reset default values on teardown.
st.caching._cache_info.cached_func_stack = []
st.caching._cache_info.suppress_st_function_warning = 0
super().tearDown()
def test_simple(self):
@st.cache
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
def test_multiple_int_like_floats(self):
@st.cache
def foo(x):
return x
self.assertEqual(foo(1.0), 1.0)
self.assertEqual(foo(3.0), 3.0)
@patch.object(st, "exception")
def test_args(self, exception):
called = [False]
@st.cache
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
exception.assert_not_called()
@patch.object(st, "exception")
def test_mutate_return(self, exception):
@st.cache
def f():
return [0, 1]
r = f()
r[0] = 1
exception.assert_not_called()
r2 = f()
exception.assert_called()
self.assertEqual(r, r2)
@patch.object(st, "exception")
def test_mutate_args(self, exception):
@st.cache
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertNotEqual(foo(d), foo(d))
exception.assert_not_called()
@patch("streamlit.caching._show_cached_st_function_warning")
def test_cached_st_function_warning(self, warning):
st.text("foo")
warning.assert_not_called()
@st.cache
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@st.cache(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@st.cache
def outer():
@st.cache
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@st.cache
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@st.cache
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
def test_multithread_stack(self):
"""Test that cached_func_stack behaves properly in multiple threads."""
def get_counter():
return len(caching._cache_info.cached_func_stack)
def set_counter(val):
caching._cache_info.cached_func_stack = ["foo"] * val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
def test_max_size(self):
"""The oldest object should be evicted when maxsize is reached."""
# Create 2 cached functions to test that they don't interfere
# with each other.
foo_vals = []
@st.cache(max_entries=2)
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache(max_entries=3)
def bar(x):
bar_vals.append(x)
return x
self.assertEqual([], foo_vals)
self.assertEqual([], bar_vals)
# Stick two items in both caches. foo will be filled.
foo(0), foo(1)
bar(0), bar(1)
self.assertEqual([0, 1], foo_vals)
self.assertEqual([0, 1], bar_vals)
# 0, 1 are already cached, so called_values shouldn't change.
foo(0), foo(1)
bar(0), bar(1)
self.assertEqual([0, 1], foo_vals)
self.assertEqual([0, 1], bar_vals)
# Add a new item to the cache.
# foo: 0 should be evicted; 1 and 2 should be present.
# bar: 0, 1, 2 present.
foo(2)
bar(2)
# foo(0) again should cause 0 to be added again, since it was
# previously evicted. Nothing will have been evicted from bar.
foo(1), foo(0)
bar(1), bar(0)
self.assertEqual([0, 1, 2, 0], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Reduce the huge amount of logspam we get from hashing/caching
@patch("streamlit.hashing._LOGGER.debug")
@patch("streamlit.caching._LOGGER.debug")
def test_no_max_size(self, _1, _2):
"""If max_size is None, the cache is unbounded."""
called_values = []
@st.cache(max_entries=None)
def f(x):
called_values.append(x)
return x
# Stick a bunch of items in the cache.
for ii in range(256):
f(ii)
# Clear called_values, and test that accessing the same bunch of
# items doesn't result in f() being called.
called_values = []
for ii in range(256):
f(ii)
self.assertEqual([], called_values)
@patch("streamlit.caching._TTLCACHE_TIMER")
def test_ttl(self, timer_patch):
"""Entries should expire after the given ttl."""
# Create 2 cached functions to test that they don't interfere
# with each other.
foo_vals = []
@st.cache(ttl=1)
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache(ttl=5)
def bar(x):
bar_vals.append(x)
return x
# Store a value at time 0
timer_patch.return_value = 0
foo(0)
bar(0)
self.assertEqual([0], foo_vals)
self.assertEqual([0], bar_vals)
# Advance our timer, but not enough to expire our value.
timer_patch.return_value = 0.5
foo(0)
bar(0)
self.assertEqual([0], foo_vals)
self.assertEqual([0], bar_vals)
# Advance our timer enough to expire foo, but not bar.
timer_patch.return_value = 1.5
foo(0)
bar(0)
self.assertEqual([0, 0], foo_vals)
self.assertEqual([0], bar_vals)
def test_clear_cache(self):
"""Clear cache should do its thing."""
foo_vals = []
@st.cache
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache
def bar(x):
bar_vals.append(x)
return x
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Clear the cache and access our original values again. They
# should be recomputed.
caching.clear_cache()
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2, 0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2, 0, 1, 2], bar_vals)
def test_unique_function_caches(self):
"""Each function should have its own cache, even if it has an
identical body and arguments to another cached function.
"""
@st.cache
def foo():
return []
@st.cache
def bar():
return []
id_foo = id(foo())
id_bar = id(bar())
self.assertNotEqual(id_foo, id_bar)
def test_function_body_uses_hashfuncs(self):
hash_func = Mock(return_value=None)
# This is an external object that's referenced by our
# function. It cannot be hashed (without a custom hashfunc).
dict_gen = {1: (x for x in range(1))}
@st.cache(hash_funcs={"builtins.generator": hash_func})
def foo(arg):
# Reference the generator object. It will be hashed when we
# hash the function body to generate foo's cache_key.
print(dict_gen)
return []
foo(1)
foo(2)
hash_func.assert_called_once()
def test_function_body_uses_nested_listcomps(self):
@st.cache()
def foo(arg):
production = [[outer + inner for inner in range(3)] for outer in range(3)]
return production
# make sure st.cache() doesn't crash, per https://github.com/streamlit/streamlit/issues/2305
self.assertEqual(foo(1), [[0, 1, 2], [1, 2, 3], [2, 3, 4]])
def test_function_name_does_not_use_hashfuncs(self):
"""Hash funcs should only be used on arguments to a function,
and not when computing the key for a function's unique MemCache.
"""
str_hash_func = Mock(return_value=None)
@st.cache(hash_funcs={str: str_hash_func})
def foo(string_arg):
return []
# If our str hash_func is called multiple times, it's probably because
# it's being used to compute the function's cache_key (as opposed to
# the value_key). It should only be used to compute the value_key!
foo("ahoy")
str_hash_func.assert_called_once_with("ahoy")
# Temporarily turn off these tests since there's no Cache object in __init__
# right now.
class CachingObjectTest(unittest.TestCase):
def off_test_simple(self):
val = 42
for _ in range(2):
c = st.Cache()
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_allow_output_mutation(self):
val = 42
for _ in range(2):
c = st.Cache(allow_output_mutation=True)
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_has_changes(self):
val = 42
for _ in range(2):
c = st.Cache()
if c.has_changes():
c.value = val
self.assertEqual(c.value, val)
@patch.object(st, "exception")
def off_test_mutate(self, exception):
for _ in range(2):
c = st.Cache()
if c:
c.value = [0, 1]
c.value[0] = 1
exception.assert_called()
class CacheErrorsTest(testutil.DeltaGeneratorTestCase):
"""Make sure user-visible error messages look correct.
These errors are a little annoying to test, but they're important! So we
are testing them word-for-word as much as possible. Even though this
*feels* like an antipattern, it isn't: we're making sure the codepaths
that pull useful debug info from the code are working.
"""
def test_st_warning_text(self):
@st.cache
def st_warning_text_func():
st.markdown("hi")
st_warning_text_func()
el = self.get_delta_from_queue(-2).new_element
self.assertEqual(el.exception.type, "CachedStFunctionWarning")
self.assertEqual(
normalize_md(el.exception.message),
normalize_md(
"""
Your script uses `st.markdown()` or `st.write()` to write to your Streamlit app
from within some cached code at `st_warning_text_func()`. This code will only be
called when we detect a cache "miss", which can lead to unexpected results.
How to fix this:
* Move the `st.markdown()` or `st.write()` call outside `st_warning_text_func()`.
* Or, if you know what you're doing, use `@st.cache(suppress_st_warning=True)`
to suppress the warning.
"""
),
)
self.assertNotEqual(len(el.exception.stack_trace), 0)
self.assertEqual(el.exception.message_is_markdown, True)
self.assertEqual(el.exception.is_warning, True)
el = self.get_delta_from_queue(-1).new_element
self.assertEqual(el.markdown.body, "hi")
@parameterized.expand([(True,), (False,)])
def test_mutation_warning_text(self, show_error_details: bool):
with testutil.patch_config_options(
{"client.showErrorDetails": show_error_details}
):
@st.cache
def mutation_warning_func():
return []
a = mutation_warning_func()
a.append("mutated!")
mutation_warning_func()
if show_error_details:
el = self.get_delta_from_queue(-1).new_element
self.assertEqual(el.exception.type, "CachedObjectMutationWarning")
self.assertEqual(
normalize_md(el.exception.message),
normalize_md(
"""
Return value of `mutation_warning_func()` was mutated between runs.
By default, Streamlit\'s cache should be treated as immutable, or it may behave
in unexpected ways. You received this warning because Streamlit detected that
an object returned by `mutation_warning_func()` was mutated outside of
`mutation_warning_func()`.
How to fix this:
* If you did not mean to mutate that return value:
- If possible, inspect your code to find and remove that mutation.
- Otherwise, you could also clone the returned value so you can freely
mutate it.
* If you actually meant to mutate the return value and know the consequences of
doing so, annotate the function with `@st.cache(allow_output_mutation=True)`.
For more information and detailed solutions check out [our
documentation.](https://docs.streamlit.io/en/latest/caching.html)
"""
),
)
self.assertNotEqual(len(el.exception.stack_trace), 0)
self.assertEqual(el.exception.message_is_markdown, True)
self.assertEqual(el.exception.is_warning, True)
else:
el = self.get_delta_from_queue(-1).new_element
self.assertEqual(el.WhichOneof("type"), "alert")
self.assertEqual(el.alert.format, Alert.ERROR)
self.assertEqual(el.alert.body, _GENERIC_UNCAUGHT_EXCEPTION_TEXT)
def test_unhashable_type(self):
@st.cache
def unhashable_type_func():
return threading.Lock()
with self.assertRaises(hashing.UnhashableTypeError) as cm:
unhashable_type_func()
ep = ExceptionProto()
exception.marshall(ep, cm.exception)
self.assertEqual(ep.type, "UnhashableTypeError")
self.assertTrue(
normalize_md(ep.message).startswith(
normalize_md(
"""
Cannot hash object of type `_thread.lock`, found in the return value of
`unhashable_type_func()`.
While caching the return value of `unhashable_type_func()`, Streamlit
encountered an object of type `_thread.lock`, which it does not know how to
hash.
To address this, please try helping Streamlit understand how to hash that type
by passing the `hash_funcs` argument into `@st.cache`. For example:
```
@st.cache(hash_funcs={_thread.lock: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `_thread.lock` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
Object of type _thread.lock:
"""
)
)
)
# Stack trace doesn't show in test :(
# self.assertNotEqual(len(ep.stack_trace), 0)
self.assertEqual(ep.message_is_markdown, True)
self.assertEqual(ep.is_warning, False)
def test_hash_funcs_acceptable_keys(self):
@st.cache
def unhashable_type_func():
return (x for x in range(1))
@st.cache(hash_funcs={types.GeneratorType: id})
def hf_key_as_type():
return (x for x in range(1))
@st.cache(hash_funcs={"builtins.generator": id})
def hf_key_as_str():
return (x for x in range(1))
with self.assertRaises(hashing.UnhashableTypeError) as cm:
unhashable_type_func()
self.assertEqual(list(hf_key_as_type()), list(hf_key_as_str()))
def test_user_hash_error(self):
class MyObj(object):
pass
def bad_hash_func(x):
x += 10 # Throws a TypeError since x has type MyObj.
return x
@st.cache(hash_funcs={MyObj: bad_hash_func})
def user_hash_error_func(x):
pass
with self.assertRaises(hashing.UserHashError) as cm:
my_obj = MyObj()
user_hash_error_func(my_obj)
ep = ExceptionProto()
exception.marshall(ep, cm.exception)
self.assertEqual(ep.type, "TypeError")
self.assertTrue(
normalize_md(ep.message).startswith(
normalize_md(
"""
unsupported operand type(s) for +=: 'MyObj' and 'int'
This error is likely due to a bug in `bad_hash_func()`, which is a
user-defined hash function that was passed into the `@st.cache` decorator of
`user_hash_error_func()`.
`bad_hash_func()` failed when hashing an object of type
`caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj`. If you
don't know where that object is coming from, try looking at the hash chain
below for an object that you do recognize, then pass that to `hash_funcs` instead:
```
Object of type caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj:
<caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj object at
"""
)
)
)
# Stack trace doesn't show in test :(
# self.assertNotEqual(len(ep.stack_trace), 0)
self.assertEqual(ep.message_is_markdown, True)
self.assertEqual(ep.is_warning, False)
def normalize_md(txt):
"""Replace newlines *inside paragraphs* with spaces.
Consecutive lines of text are considered part of the same paragraph
in Markdown. So this function joins those into a single line to make the
test robust to changes in text wrapping.
NOTE: This function doesn't attempt to be 100% grammatically correct
Markdown! It's just supposed to be "correct enough" for tests to pass. For
example, when we guard "\n\n" from being converted, we really should be
guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests.
"""
# Two newlines in a row should NOT be replaced with a space.
txt = txt.replace("\n\n", "OMG_NEWLINE")
# Lists should NOT be replaced with a space.
txt = txt.replace("\n*", "OMG_STAR")
txt = txt.replace("\n-", "OMG_HYPHEN")
# Links broken over two lines should not get an extra space.
txt = txt.replace("]\n(", "OMG_LINK")
# Convert all remaining newlines into spaces.
txt = txt.replace("\n", " ")
# Restore everything else.
txt = txt.replace("OMG_NEWLINE", "\n\n")
txt = txt.replace("OMG_STAR", "\n*")
txt = txt.replace("OMG_HYPHEN", "\n-")
txt = txt.replace("OMG_LINK", "](")
return txt.strip()
|
__init__.py | # -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import json
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.paths import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils
import salt.utils.process
import salt.log.setup as salt_log_setup
from salt.ext import six
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.utils.nb_popen import NonBlockingPopen
from salt.exceptions import SaltClientError
try:
import salt.master
except ImportError:
# Not required for raet tests
pass
# Import 3rd-party libs
import yaml
import msgpack
import salt.ext.six as six
from salt.ext.six.moves import cStringIO
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
from tornado import gen
from tornado import ioloop
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
if DARWIN and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.get_colors(self.parser.options.no_colors is False)
if salt.utils.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_daemon(self, cls, opts, start_fun):
def start(cls, opts, start_fun):
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
daemon = cls(opts)
getattr(daemon, start_fun)()
process = multiprocessing.Process(target=start,
args=(cls, opts, start_fun))
process.start()
return process
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
if self.parser.options.proxy:
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
def start_raet_daemons(self):
'''
Fire up the raet daemons!
'''
import salt.daemons.flo
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
self.master_opts,
'start')
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.minion_opts,
'tune_in')
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.sub_minion_opts,
'tune_in')
# Wait for the daemons to all spin up
time.sleep(5)
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
# self.syndic_master_opts,
# 'start')
# no raet syndic daemon yet
start_tcp_daemons = start_zeromq_daemons
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.which('ssh-keygen')
sshd = salt.utils.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err)))
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP_CONF_DIR):
shutil.rmtree(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['config_dir'] = RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir-proxy', 'cache')
# proxy_opts['user'] = running_tests_user
proxy_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir-proxy', 'pki')
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
# syndic_master_opts['transport'] = 'raet'
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
fp_.write(yaml.dump(computed_config, default_flow_style=False))
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
wfh.write(
yaml.dump(sub_minion_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
wfh.write(
yaml.dump(syndic_master_computed_config, default_flow_style=False)
)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
wfh.write(
yaml.dump(syndic_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
shutil.rmtree(dirname, onerror=remove_readonly)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
salt.utils.appendproctitle('WaitForMinionConnections')
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
try:
responses = self.client.cmd(
list(expected_connections), 'test.ping', tgt_type='list',
)
# we'll get this exception if the master process hasn't finished starting yet
except SaltClientError:
time.sleep(0.1)
now = datetime.now()
continue
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
|
aws_utils.py | # Copyright (c) 2018 NVIDIA Corporation
import boto3
import botocore
import time
import threading
import json
import hashlib
from milano.backends.utils import SSHClient
class EC2Instance:
def __init__(self, resource, username, private_key_path):
self._resource = resource
self._private_key_path = private_key_path
self._username = username
self._ssh_client = None
self._terminated = False
def is_running(self):
return self.state() == 'running'
def is_terminated(self):
s = self.state()
return s != 'pending' and s != 'running'
def state(self):
self._reload()
s = self._resource.state['Name']
if s == 'terminated':
self._terminated = True
return s
def public_ip(self):
self._reload()
return self._resource.public_ip_address
def instance_id(self):
return self._resource.instance_id
def _reload(self):
if not self._terminated:
self._resource.reload()
def __try_connect(self):
if self._resource.state['Name'] != 'running':
raise Exception("instance not running")
if self._ssh_client is None:
client = SSHClient(self._private_key_path)
client.connect(self.public_ip(), self._username)
self._ssh_client = client
def exec_command(self, command):
self.__try_connect()
return self._ssh_client.exec_command(command)
def exec_command_blocking(self, command, retries=3):
for i in range(retries):
try:
self.__try_connect()
return self._ssh_client.exec_command_blocking(command)
except Exception as e:
if i < retries - 1:
try:
if self._ssh_client is not None:
self._ssh_client.close()
except:
pass
self._ssh_client = None
else:
raise e
def keep_alive(self):
# As long as this file remains less than 5 minutes old, the instance
# won't terminate.
try:
self.exec_command_blocking("touch /home/ubuntu/.milano_keep_alive")
except:
pass
def is_driver_working(self):
try:
ec, _, _ = self.exec_command_blocking("nvidia-smi")
return ec == 0
except:
return False
def datasets_present(self, datasets):
try:
for i in range(len(datasets)):
ec, _, _ = self.exec_command_blocking("ls /home/ubuntu/data/" + str(i))
if ec != 0:
return False
except:
return False
return True
def terminate(self):
return self._resource.terminate()
def startup_script(datasets):
dataset_mounts = "\n"
for i in range(len(datasets)):
if datasets[i]['type'] == 's3':
dataset_mounts += "aws s3 sync {src} {dst}\n".format(
src="s3://{bucket}/{prefix}".format(
bucket=datasets[i]['bucket'],
prefix=datasets[i].get('prefix', "")),
dst="/home/ubuntu/data/" + str(i),
)
else:
raise Exception("unrecognized dataset source type '{}'".format(
datasets[i]['type']))
# TODO All of the software installation should be baked into an AMI instead,
# this is pretty slow.
return """#!/bin/bash
touch /home/ubuntu/.milano_keep_alive
chmod 777 /home/ubuntu/.milano_keep_alive
eval "while true; do find /home/ubuntu/.milano_keep_alive -mmin +5 -exec shutdown -h now {} + && sleep 10; done" &>/dev/null &disown;
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
groupadd docker
usermod -aG docker ubuntu
apt-get update
apt-get install -y awscli
""" + dataset_mounts + """
apt-get install -y docker-ce
apt-get install -y nvidia-docker2
apt-get install -y nvidia-384
modprobe nvidia
systemctl restart docker
"""
class EC2InstanceManager:
def __init__(self, count, key_name, private_key_path, region_name,
spot_instances, datasets, iam_role, user_params):
self._desired_count = count
self._key_name = key_name
self._private_key_path = private_key_path
self._region_name = region_name
self._spot_instances = spot_instances
self._datasets = datasets
self._iam_role = iam_role
self._user_params = user_params
self._instances = {}
self._active_instance_ids = []
self._thread = None
self._lock = threading.Lock()
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._management_thread_main)
self._thread.start()
def _ami_for_region(self):
# ubuntu 16.04 HVM SSD
ami = {
"us-east-1": "ami-5c150e23",
"us-west-1": "ami-4d6a852e",
"ap-northeast-1": "ami-e5b3ca08",
"sa-east-1": "ami-01316f8dfe32c01e2",
"ap-southeast-1": "ami-01fde464a811ead8a",
"ca-central-1": "ami-4975f82d",
"ap-south-1": "ami-0dcc9657fd6ff85bc",
"eu-central-1": "ami-9fbfb174",
"eu-west-1": "ami-0a8458313ef39d6f6",
"cn-north-1": "ami-0510c868",
"cn-northwest-1": "ami-f96c7b9b",
"us-gov-west-1": "ami-3a4dd15b",
"ap-northeast-2": "ami-09960a24a97b8087b",
"ap-southeast-2": "ami-fc26869e",
"us-west-2": "ami-529fb82a",
"us-east-2": "ami-0eb3ba416aed8a6a4",
"eu-west-2": "ami-52d12435",
"ap-northeast-3": "ami-0d5d86281edca346f",
"eu-west-3": "ami-0a06fa501d424d43f"
}
return ami.get(self._region_name, "")
def _launch(self, launch_count):
s = boto3.Session(region_name=self._region_name)
iam_client = s.client('iam')
iam = s.resource("iam")
ec2 = s.resource("ec2")
# unique role per dataset config
if self._iam_role is None:
self._iam_role, _ = get_or_create_role(
"milano-" + sha1short(json.dumps(self._datasets)),
self._datasets, iam, iam_client)
profile_name, _ = get_or_create_instance_profile(
self._iam_role + "-ip", self._iam_role, iam)
sg_id = get_or_create_ssh_security_group("milano-worker-ssh", ec2)
create_params = {
'InstanceType': "p3.2xlarge",
'ImageId': self._ami_for_region(),
'KeyName': self._key_name,
'MinCount': launch_count,
'MaxCount': launch_count,
'SecurityGroupIds': [sg_id],
'BlockDeviceMappings': [{
"DeviceName": "/dev/xvda",
"Ebs": {
"DeleteOnTermination": True,
# TODO expose this as a top level config option?
"VolumeSize": 64
}
}],
'TagSpecifications': [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'Name',
'Value': 'milano-worker',
}]
}],
"IamInstanceProfile": {
"Name": profile_name,
},
# If ~/.milano_keep_alive isn't touched every 5 minutes, the instance
# will auto terminate.
'InstanceInitiatedShutdownBehavior': "terminate",
'UserData': startup_script(self._datasets),
}
if self._spot_instances:
create_params['InstanceMarketOptions'] = {
'MarketType': 'spot',
'SpotOptions': {
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
}
}
create_params.update(self._user_params)
instance_resources = ec2.create_instances(**create_params)
with self._lock:
for instance_resource in instance_resources:
self._instances[instance_resource.instance_id] = EC2Instance(
instance_resource, "ubuntu", self._private_key_path)
def active_instance_ids(self):
with self._lock:
return self._active_instance_ids.copy()
def get_instance(self, instance_id):
with self._lock:
return self._instances[instance_id]
def terminate(self):
self._stop_event.set()
self._thread.join()
for _, instance in self._instances.items():
instance.terminate()
def _management_thread_main(self):
while not self._stop_event.is_set():
next_active_ids = []
alive_count = 0
for instance_id, instance in self._instances.items():
if not instance.is_terminated():
alive_count += 1
if instance.is_running():
instance.keep_alive()
if instance.is_driver_working() and instance.datasets_present(
self._datasets):
next_active_ids.append(instance_id)
if alive_count < self._desired_count:
needed_count = self._desired_count - alive_count
print("launching {count} EC2 instances and mounting datasets. this may take a few minutes...".
format(count=needed_count))
try:
self._launch(needed_count)
except Exception as e:
print(e)
pass
with self._lock:
self._active_instance_ids = next_active_ids
time.sleep(10)
def get_or_create_ssh_security_group(name, ec2):
try:
groups = ec2.security_groups.filter(GroupNames=[name])
for group in groups:
return group.group_id
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidGroup.NotFound':
raise e
# No existing security group, create one.
sg = ec2.create_security_group(Description=name, GroupName=name)
sg.authorize_ingress(
IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort=22, ToPort=22)
return sg.group_id
def get_or_create_role(name, datasets, iam, client):
try:
role = iam.Role(name)
return role.role_name, role.role_id
except Exception as e:
pass
role = iam.create_role(RoleName=name, AssumeRolePolicyDocument=json.dumps({
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["ec2.amazonaws.com"]
},
"Action": ["sts:AssumeRole"]
}]
}))
for i in range(len(datasets)):
bucket = bucket=datasets[i]['bucket']
prefix = datasets[i].get('prefix', "")
resp = client.put_role_policy(
RoleName=name,
PolicyName=name + "-policy-" + str(i),
PolicyDocument=json.dumps({
"Statement":[
{
"Action": ["s3:ListBucket"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::{}".format(bucket)],
"Condition":{"StringLike":{"s3:prefix":["{}/*".format(prefix)]}}
},
{
"Effect": "Allow",
"Action": ["s3:Get*"],
"Resource": ["arn:aws:s3:::{}/{}*".format(bucket, prefix)]
}
]
}
)
)
return role.role_name, role.role_id
def get_or_create_instance_profile(name, role, iam):
try:
instance_profile = iam.InstanceProfile(name)
return name, instance_profile.instance_profile_id
except Exception as e:
pass
instance_profile = iam.create_instance_profile(InstanceProfileName=name)
instance_profile.add_role(RoleName=role)
# create_instances will fail if we try to use this instance profile too soon.
time.sleep(10)
return name, instance_profile.instance_profile_id
def sha1short(str):
return hashlib.sha1(str.encode()).hexdigest()[:6] |
opentherm.py | import re
from threading import Lock, Thread
import logging
logging.basicConfig(filename='py_otgw_mqtt.log',level=logging.DEBUG)
# unhash the above line to log to a file
log = logging.getLogger(__name__)
# Default namespace for the topics. Will be overwritten with the value in
# config
topic_namespace="otgw/value"
# Parse hex string to int
def hex_int(hex):
return int(hex, 16)
# Pre-compile a regex to parse valid OTGW-messages
# e.g. R80000200
line_parser = re.compile(
r'^(?P<source>[BART])(?P<type>[0-9A-F])(?P<res>[0-9A-F])'
r'(?P<id>[0-9A-F]{2})(?P<data>[0-9A-F]{4})$'
)
def flags_msg_generator(ot_id, val):
r"""
Generate the pub-messages from a boolean value.
Currently, boiler status is supported.
Marty Added bits 0,8,9 for fault,ch_enabled, and dhw_enabled.
Returns a generator for the messages
"""
yield ("{}/{}".format(topic_namespace, ot_id), val, )
if(ot_id == "boiler_status"):
yield ("{}/fault".format(topic_namespace),
val & ( 1 << 0 ) > 0, )
yield ("{}/ch_active".format(topic_namespace),
val & ( 1 << 1 ) > 0, )
yield ("{}/dhw_active".format(topic_namespace),
val & ( 1 << 2 ) > 0, )
yield ("{}/flame_status".format(topic_namespace),
val & ( 1 << 3 ) > 0, )
yield ("{}/ch_enabled".format(topic_namespace),
val & ( 1 << 8 ) > 0, )
yield ("{}/dhw_enabled".format(topic_namespace),
val & ( 1 << 9 ) > 0, )
def float_msg_generator(ot_id, val):
r"""
Generate the pub-messages from a float-based value
Returns a generator for the messages
"""
yield ("{}/{}".format(topic_namespace, ot_id), round(val/float(256), 2), )
def int_msg_generator(ot_id, val):
r"""
Generate the pub-messages from an integer-based value
Returns a generator for the messages
"""
yield ("{}/{}".format(topic_namespace, ot_id), val, )
def get_messages(message):
r"""
Generate the pub-messages from the supplied OT-message
Returns a generator for the messages
Marty added 5 to ttype below to capture max_relative_modulation_level
and control_setpoint from boiler in a system with basic contact closure thermostat
"""
info = line_parser.match(message)
log.info("Message is: '{}'".format(message))
if info is None:
if message:
log.debug("Did not understand message: '{}'".format(message))
return iter([])
(source, ttype, res, did, data) = \
map(lambda f, d: f(d),
(str, lambda _: hex_int(_) & 7, hex_int, hex_int, hex_int),
info.groups())
if source not in ('B', 'T', 'A') \
or ttype not in (1,4,5) \
or did not in opentherm_ids:
return iter([])
id_name, parser = opentherm_ids[did]
return parser(id_name, data)
# Map the opentherm ids (named group 'id' in the line parser regex) to
# discriptive names and message creators. I put this here because the
# referenced generators have to be assigned first
opentherm_ids = {
0: ("boiler_status",flags_msg_generator,),
1: ("control_setpoint",float_msg_generator,),
9: ("remote_override_setpoint",float_msg_generator,),
14: ("max_relative_modulation_level",float_msg_generator,),
16: ("room_setpoint",float_msg_generator,),
17: ("relative_modulation_level",float_msg_generator,),
18: ("ch_water_pressure",float_msg_generator,),
24: ("room_temperature",float_msg_generator,),
25: ("boiler_water_temperature",float_msg_generator,),
26: ("dhw_temperature",float_msg_generator,),
27: ("outside_temperature",float_msg_generator,),
28: ("return_water_temperature",float_msg_generator,),
56: ("dhw_setpoint",float_msg_generator,),
57: ("max_ch_water_setpoint",float_msg_generator,),
116: ("burner_starts",int_msg_generator,),
117: ("ch_pump_starts",int_msg_generator,),
118: ("dhw_pump_starts",int_msg_generator,),
119: ("dhw_burner_starts",int_msg_generator,),
120: ("burner_operation_hours",int_msg_generator,),
121: ("ch_pump_operation_hours",int_msg_generator,),
122: ("dhw_pump_valve_operation_hours",int_msg_generator,),
123: ("dhw_burner_operation_hours",int_msg_generator,)
}
class OTGWClient(object):
r"""
An abstract OTGW client.
This class can be used to create implementations of OTGW clients for
different types of communication protocols and technologies. To create a
full implementation, only four methods need to be implemented.
"""
def __init__(self, listener, **kwargs):
self._worker_running = False
self._listener = listener
self._worker_thread = None
def open(self):
r"""
Open the connection to the OTGW
Must be overridden in implementing classes. Called before reading of
the data starts. Should not return until the connection is opened, so
an immediately following call to `read` does not fail.
"""
raise NotImplementedError("Abstract method")
def close(self):
r"""
Close the connection to the OTGW
Must be overridden in implementing classes. Called after reading of
the data is finished. Should not return until the connection is closed.
"""
raise NotImplementedError("Abstract method")
def write(self, data):
r"""
Write data to the OTGW
Must be overridden in implementing classes. Called when a command is
received that should be sent to the OTGW. Should pass on the data
as-is, not appending line feeds, carriage returns or anything.
"""
raise NotImplementedError("Abstract method")
def read(self, timeout):
r"""
Read data from the OTGW
Must be overridden in implementing classes. Called in a loop while the
client is running. May return any block of data read from the
connection, be it line by line or any other block size. Must return a
string. Line feeds and carriage returns should be passed on unchanged.
Should adhere to the timeout passed. If only part of a data block is
read before the timeout passes, return only the part that was read
successfully, even if it is an empty string.
"""
raise NotImplementedError("Abstract method")
def join(self):
r"""
Block until the worker thread finishes
"""
self._worker_thread.join()
def start(self):
r"""
Connect to the OTGW and start reading data
"""
if self._worker_thread:
raise RuntimeError("Already running")
self._worker_thread = Thread(target=self._worker)
self._worker_thread.start()
def stop(self):
r"""
Stop reading data and disconnect from the OTGW
"""
if not self._worker_thread:
raise RuntimeError("Not running")
self._worker_running = False
self.join()
def _worker(self):
# _worker_running should be True while the worker is running
self._worker_running = True
# Open the connection to the OTGW
self.open()
# Compile a regex that will only match the first part of a string, up
# to and including the first time a line break and/or carriage return
# occurs. Match any number of line breaks and/or carriage returns that
# immediately follow as well (effectively discarding empty lines)
line_splitter = re.compile(r'^.*[\r\n]+')
# Create a buffer for read data
data = ""
while self._worker_running:
# Call the read method of the implementation
data += self.read()
# Find all the lines in the read data
while True:
m = line_splitter.match(data)
if not m:
# There are no full lines yet, so we have to read some more
break
# Get all the messages for the line that has been read,
# most lines will yield no messages or just one, but
# flags-based lines may return more than one.
for msg in get_messages(m.group().rstrip('\r\n')):
try:
# Pass each message on to the listener
self._listener(msg)
except Exception as e:
# Log a warning when an exception occurs in the
# listener
log.warn(str(e))
# Strip the consumed line from the buffer
data = data[m.end():]
# After the read loop, close the connection and clean up
self.close()
self._worker_thread = None
|
main.py | from login import main as login
from boardparselist import main as boardparse
from commentparselist import main as commentparse
from boarddelete import main as boarddelete
from commentdelete import main as commentdelete
from app_id import main as app_id
from time import sleep
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import threading
#gui start
class Ui_Form(object):
appid = ""
id = ""
pw = ""
cookies = ""
cmtlist = []
pstlist = []
pstflag = False
cmtflag = False
loginflag = False
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(287, 268)
Form.setStyleSheet("font: 9pt ;")
self.lineEdit = QtWidgets.QLineEdit(Form)
self.lineEdit.setGeometry(QtCore.QRect(10, 20, 171, 20))
self.lineEdit.setStyleSheet("font: 9pt ;")
self.lineEdit.setInputMask("")
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(Form)
self.lineEdit_2.setGeometry(QtCore.QRect(10, 50, 171, 20))
self.lineEdit_2.setStyleSheet("font: 12pt ;")
self.lineEdit_2.setInputMask("")
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_2.setEchoMode(self.lineEdit_2.Password)
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(190, 20, 75, 51))
self.pushButton.setAutoFillBackground(False)
self.pushButton.clicked.connect(self.loginbtn)
self.pushButton.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(224, 255, 255);\n"
"font: 16pt ;\n"
"")
self.pushButton.setCheckable(False)
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(90, 90, 56, 12))
self.label.setStyleSheet("font: 10pt ;")
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(90, 110, 56, 12))
self.label_2.setStyleSheet("font: 10pt ;")
self.label_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.label_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(10, 210, 261, 23))
self.pushButton_2.setStyleSheet("background-color: rgb(254,255, 255);\n"
"color: rgb(160, 160, 160);\n"
"font: 13pt ;\n"
"")
self.pushButton_2.clicked.connect(self.deletestart)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.setEnabled(False)
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(10, 160, 261, 16))
self.label_3.setStyleSheet("font: 9pt ;")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(10, 190, 261, 16))
self.label_4.setStyleSheet("font: 9pt ;")
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(10, 82, 91, 20))
self.label_5.setStyleSheet("font: 10pt ;")
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(10, 100, 81, 20))
self.label_6.setStyleSheet("font: 10pt ;")
self.label_6.setObjectName("label_6")
self.namelabel = QtWidgets.QLabel(Form)
self.namelabel.setGeometry(QtCore.QRect(10, 130, 251, 16))
self.namelabel.setStyleSheet("font: 10pt ;")
self.namelabel.setText("")
self.namelabel.setObjectName("namelabel")
self.warringlabel = QtWidgets.QLabel(Form)
self.warringlabel.setGeometry(QtCore.QRect(10, 240, 261, 16))
self.warringlabel.setStyleSheet("color: rgb(255, 0, 0);")
self.warringlabel.setText("")
self.warringlabel.setObjectName("warringlabel")
self.restcmt = QtWidgets.QPushButton(Form)
self.restcmt.setGeometry(QtCore.QRect(160, 105, 101, 16))
self.restcmt.setStyleSheet("background-color: rgb(254, 255, 255);\n"
"color: rgb(160, 160, 160);")
self.restcmt.setEnabled(False)
self.restcmt.setObjectName("restcmt")
self.restcmt.clicked.connect(self.clickcmtbtn)
self.respst = QtWidgets.QPushButton(Form)
self.respst.setGeometry(QtCore.QRect(160, 80, 101, 16))
self.respst.setStyleSheet("background-color: rgb(254, 255, 255);\n"
"color: rgb(160, 160, 160);")
self.respst.setObjectName("respst")
self.respst.clicked.connect(self.clickpstbtn)
self.respst.setEnabled(False)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "피에로 클리너 v0.8.1"))
self.lineEdit.setPlaceholderText(_translate("Form", "아이디"))
self.lineEdit_2.setPlaceholderText(_translate("Form", "비밀번호"))
self.pushButton.setText(_translate("Form", "로그인"))
self.label.setText(_translate("Form", "0"))
self.label_2.setText(_translate("Form", "0"))
self.pushButton_2.setText(_translate("Form", "삭제 시작"))
self.label_3.setText(_translate("Form", "카카오톡 플러스친구 @피에로 를 추가해주세요"))
self.label_4.setText(_translate("Form", "친구추가시 여러가지 소식을 받을수 있어요"))
self.label_5.setText(_translate("Form", "총 작성글 개수 "))
self.label_6.setText(_translate("Form", "총 댓글 개수 "))
self.restcmt.setText(_translate("Form", "업데이트"))
self.respst.setText(_translate("Form", "업데이트"))
def timercount(self):
sleep(59)
self.warringlabel.setText("")
self.loginflag = True
self.restcmt.setEnabled(True)
self.respst.setEnabled(True)
self.pushButton.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"color: rgb(160, 160, 160);\n"
"font: 16pt ;\n"
"")
self.respst.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(224, 255, 255);\n"
"font: 10pt ;\n"
"")
self.restcmt.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(224, 255, 255);\n"
"font: 10pt ;\n"
"")
self.warringlabel.setText("")
def loginbtn(self):
self.id = self.lineEdit.text()
self.pw = self.lineEdit_2.text()
self.cookies , self.id = login(self.id,self.pw,self.namelabel,self.warringlabel)
if self.cookies != False: # 로그인 성공시
self.appid = app_id() # 로그인 성공시 app_id 가져옴
t = threading.Thread(target=self.timercount)
t.start()
self.lineEdit.setDisabled(True)
self.lineEdit_2.setDisabled(True)
self.warringlabel.setText("로그인 성공!!! 60초만 기다려주세요...")
# 게시글 업데이트 버튼 클릭시 처리 루프 시작
def updatepst(self):
self.pstlist = boardparse(self.id,self.cookies)
self.label.setText(str(len(self.pstlist)))
self.pushButton_2.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(224, 255, 255);\n"
"font: 16pt ;\n"
"")
self.warringlabel.setText("")
self.pstflag=True
def clickpstbtn(self): # 게시글 업데이트 버튼 클릭시
self.respst.setEnabled(False)
self.pushButton_2.setEnabled(True)
self.warringlabel.setText("수집중....")
self.respst.setEnabled(False)
self.respst.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"color: rgb(160, 160, 160);\n"
"font: 10pt ;\n"
"")
t = threading.Thread(target=self.updatepst)
t.start()
#게시글 업데이트 버튼 클릭시 처리 루프 끝
#댓글 업데이트 버튼 클릭시 처리 루프 시작
def updatecmt(self):
self.cmtlist = commentparse(self.id,self.cookies)
self.label_2.setText(str(len(self.cmtlist)))
self.pushButton_2.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(224, 255, 255);\n"
"font: 16pt ;\n"
"")
self.warringlabel.setText("")
self.cmtflag=True
def clickcmtbtn(self): # 댓글 업데이트 버튼 클릭시
self.restcmt.setEnabled(False)
self.pushButton_2.setEnabled(True)
self.warringlabel.setText("수집중....")
self.restcmt.setEnabled(False)
self.restcmt.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"color: rgb(160, 160, 160);\n"
"font: 10pt ;\n"
"")
t = threading.Thread(target=self.updatecmt)
t.start()
#댓글 업데이트 버튼 클릭시 처리 루프 끝
def deletestart(self):
if self.pstflag == True: # 게시글 수집완료시
t = threading.Thread(target=boarddelete, args=(self.id,self.pw,self.pstlist,self.warringlabel,self.appid))
t.start()
self.respst.setEnabled(True)
self.respst.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(254, 255, 255);\n"
"font: 10pt ;\n"
"")
if self.cmtflag == True: # 댓글 수집완료시
t = threading.Thread(target=commentdelete, args=(self.id,self.pw,self.cmtlist,self.warringlabel,self.appid))
t.start()
self.restcmt.setEnabled(True)
self.restcmt.setStyleSheet("background-color: rgb(74, 87, 168);\n"
"color: rgb(254, 255, 255);\n"
"font: 10pt ;\n"
"")
# gui end
if __name__ == "__main__":
import sys
#gui load start
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
#gui load end
app.exec_() |
crawl_data.py | """
we use the ccxt to crawl data then save it to csv file.
you need to install ccxt by running firstly:
"""
import pandas as pd
import time
from datetime import datetime
import requests
import pytz
from howtrader.trader.database import database_manager
pd.set_option('expand_frame_repr', False) #
from howtrader.trader.object import BarData, Interval, Exchange
BINANCE_SPOT_LIMIT = 1000
BINANCE_FUTURE_LIMIT = 1500
CHINA_TZ = pytz.timezone("Asia/Shanghai")
from threading import Thread
def generate_datetime(timestamp: float) -> datetime:
"""
:param timestamp:
:return:
"""
dt = datetime.fromtimestamp(timestamp / 1000)
dt = CHINA_TZ.localize(dt)
return dt
def get_binance_data(symbol: str, exchanges: str, start_time: str, end_time: str):
"""
爬取币安交易所的数据
:param symbol: BTCUSDT.
:param exchanges: 现货、USDT合约, 或者币币合约.
:param start_time: 格式如下:2020-1-1 或者2020-01-01
:param end_time: 格式如下:2020-1-1 或者2020-01-01
:return:
"""
api_url = ''
save_symbol = symbol
gate_way = 'BINANCES'
if exchanges == 'spot':
print("spot")
limit = BINANCE_SPOT_LIMIT
save_symbol = symbol.lower()
gate_way = 'BINANCE'
api_url = f'https://api.binance.com/api/v3/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'future':
print('future')
limit = BINANCE_FUTURE_LIMIT
api_url = f'https://fapi.binance.com/fapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'coin_future':
print("coin_future")
limit = BINANCE_FUTURE_LIMIT
f'https://dapi.binance.com/dapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
pass
else:
raise Exception('交易所名称请输入以下其中一个:spot, future, coin_future')
start_time = int(datetime.strptime(start_time, '%Y-%m-%d').timestamp() * 1000)
end_time = int(datetime.strptime(end_time, '%Y-%m-%d').timestamp() * 1000)
while True:
try:
print(start_time)
url = f'{api_url}&startTime={start_time}'
print(url)
data = requests.get(url=url).json()
"""
[
[
1591258320000, // 开盘时间
"9640.7", // 开盘价
"9642.4", // 最高价
"9640.6", // 最低价
"9642.0", // 收盘价(当前K线未结束的即为最新价)
"206", // 成交量
1591258379999, // 收盘时间
"2.13660389", // 成交额(标的数量)
48, // 成交笔数
"119", // 主动买入成交量
"1.23424865", // 主动买入成交额(标的数量)
"0" // 请忽略该参数
]
"""
buf = []
for l in data:
bar = BarData(
symbol=save_symbol,
exchange=Exchange.BINANCE,
datetime=generate_datetime(l[0]),
interval=Interval.MINUTE,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=gate_way
)
buf.append(bar)
database_manager.save_bar_data(buf)
# 到结束时间就退出, 后者收盘价大于当前的时间.
if (data[-1][0] > end_time) or data[-1][6] >= (int(time.time() * 1000) - 60 * 1000):
break
start_time = data[-1][0]
except Exception as error:
print(error)
time.sleep(10)
def download_spot():
"""
下载现货数据的方法.
:return:
"""
t1 = Thread(target=get_binance_data, args=('BTCUSDT', 'spot', "2018-1-1", "2019-1-1"))
t2 = Thread(target=get_binance_data, args=('BTCUSDT', 'spot', "2019-1-1", "2020-1-1"))
t3 = Thread(target=get_binance_data, args=('BTCUSDT', 'spot', "2020-1-1", "2020-11-16"))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def download_future():
"""
下载合约数据的方法。
:return:
"""
t1 = Thread(target=get_binance_data, args=('BTCUSDT', 'future', "2019-9-10", "2020-3-1"))
t2 = Thread(target=get_binance_data, args=('BTCUSDT', 'future', "2019-3-1", "2020-11-16"))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
# download_spot() # 下载现货的数据.
download_future() # 下载合约的数据
|
Test_prog.py | from __future__ import print_function
from vicon_dssdk import ViconDataStream
import argparse
import time
import logging
import time
from threading import Thread
import cflib
from cflib.crazyflie import Crazyflie
from cflib.utils import uri_helper
import matlab.engine
import keyboard
import math
import numpy
uri = uri_helper.uri_from_env(default='radio://0/80/2M/E7E7E7E7E8')
logging.basicConfig(level=logging.ERROR)
class DroneCommands:
"""Example that connects to a Crazyflie and ramps the motors up/down and
the disconnects"""
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
self._cf = Crazyflie(rw_cache='./cache')
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
self._cf.open_link(link_uri)
print('Connecting to %s' % link_uri)
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
# Start a separate thread to do the motor test.
# Do not hijack the calling thread!
Thread(target=self._unlock_motors).start()
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the specified address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
def _unlock_motors(self):
# Unlock startup thrust protection
self._cf.commander.send_setpoint(0, 0, 0, 0)
def send_drone_command(self, _desired_roll, _desired_pitch, _desired_yawrate, _desired_thrust):
# Ensures maximum and minimum thrust isn't exceeded
if (_desired_thrust < 0):
_desired_thrust = 0
elif (_desired_thrust > 0xFFFF):
_desired_thrust = 0xFFFF
max_Rot = 15 # max/min value of Roll and Pitch
if (_desired_roll < -max_Rot):
_desired_roll = -max_Rot
elif (_desired_roll > max_Rot):
_desired_roll = max_Rot
if (_desired_pitch < -max_Rot):
_desired_pitch = -max_Rot
elif (_desired_pitch > max_Rot):
_desired_pitch = max_Rot
# Terminal outputs of values for troubleshooting
#print("sending thrust:", _desired_thrust)
#print("sending roll:", _desired_roll)
#print("sending pitch:", _desired_pitch)
#print("sending yawrate:", _desired_yawrate)
self._cf.commander.send_setpoint(_desired_roll, _desired_pitch, _desired_yawrate, int(_desired_thrust)) # Send Set points to the drone
class PID_Parameters:
def __init__(self, k_p, k_i, k_d):
self.kp = k_p
self.ki = k_i
self.kd = k_d
self.error_list = [9999, 9999, 9999, 9999]
self.inte = 0
class ControlParameters:
"""Parameters used for the PID controller"""
def __init__(self, statevector):
self.Hz = 10 # Frequency of the code
# Kp Ki Kd
self.Param_x = PID_Parameters( 3.0, 1.0, 7.0)
self.Param_y = PID_Parameters( 3.0, 1.0, 7.0)
self.Param_z = PID_Parameters(5643.0, 500.0, 26000.0)
self.Param_yaw = PID_Parameters( 50.0, 0.6, 6.0)
self.offset_roll = 0 # Difference between calculated input required to hover compared to actual input required
self.offset_pitch = 0
self.offset_yawrate = 0
self.offset_thrust = 0
self._trajectory_list = self._getTrajectory(statevector)
self.x_array = [self._trajectory_list[0], self._trajectory_list[ 1], self._trajectory_list[ 2]]
self.y_array = [self._trajectory_list[3], self._trajectory_list[ 4], self._trajectory_list[ 5]]
self.z_array = [self._trajectory_list[6], self._trajectory_list[ 7], self._trajectory_list[ 8]]
self.inputs_array = [self._trajectory_list[9], self._trajectory_list[10], self._trajectory_list[11]]
def _getTrajectory(self, statevector):
#start_point=statevector
#start_point.append(0)
start_point = [ 1.,1.,0.,0.]
via_points = ([[1.,1.,1.,2],[-1.,1.,1.,4],[-1.,-1.,1.,6],[1.,-1.,1.,8],
[1.,1.,1.,10]])
end_point = [1.,1.,0.,15.]
print("Starting Matlab engine")
eng = matlab.engine.start_matlab()
matlabpath = "Path to matlab files" # specify your path
eng.addpath(matlabpath, nargout= 0)
print("Starting Matlab code")
return eng.GetTrajectoryxyz(matlab.double(start_point),matlab.double(via_points),matlab.double(end_point),matlab.double([self.Hz]))
def integrator(self, _current_error, _inte):
_inte = (1/self.Hz)*_current_error + _inte
return _inte
def diff(self, _error, error_list):
error_list.append(_error)
error_list.pop(0)
if error_list[0] is not 9999:
a = (error_list[len(error_list)-1] - error_list[len(error_list)-3])/((1/Control_Param.Hz)*2)
b = (error_list[len(error_list)-2] - error_list[len(error_list)-4])/((1/Control_Param.Hz)*2)
else:
a = 0
b = 0
_result = (a+b)/2
return _result
def convert2bodyframe(self, _roll, _pitch, _yaw):
roll = (math.cos(_yaw)*_roll + math.sin(_yaw)*_pitch)
pitch = (math.cos(_yaw)*_pitch - math.sin(_yaw)*_roll)
return roll, pitch
def PID_Controller(_current_Position, _desired_Position, _PID_Param):
error = _desired_Position - _current_Position
if _current_Position is not 0.0 or _current_Position is not 4.0:
_PID_Param.inte = Control_Param.integrator(error, _PID_Param.inte)
diff = Control_Param.diff(error, _PID_Param.error_list)
ControlVariable = error*_PID_Param.kp + _PID_Param.inte*_PID_Param.ki + diff*_PID_Param.kd
return ControlVariable
def Controller(_statevector, _t):
try:
client.GetFrame()
list,occ = client.GetSegmentGlobalTranslation(OBJECT, OBJECT )
_statevector = [float(list[0])/1000,float(list[1])/1000,float(list[2])/1000]
list_rpy,occ = client.GetSegmentGlobalRotationEulerXYZ(OBJECT, OBJECT )
rotationvector = [float(list_rpy[0]),float(list_rpy[1]),float(list_rpy[2])]
except ViconDataStream.DataStreamException as e:
print(e)
if _statevector[2] == 0.0:
_statevector[2] = 4.0
SetPoint_roll = Control_Param.inputs_array[0][interals] + PID_Controller(_statevector[1], Control_Param.y_array[0][interals], Control_Param.Param_y)
SetPoint_pitch = Control_Param.inputs_array[1][interals] - PID_Controller(_statevector[0], Control_Param.x_array[0][interals], Control_Param.Param_x)
SetPoint_thrust = Control_Param.inputs_array[2][interals] + PID_Controller(_statevector[2], Control_Param.z_array[0][interals], Control_Param.Param_z)
SetPoint_yawrate = -( 0 + PID_Controller(rotationvector[2], 0, Control_Param.Param_yaw))
SetPoint_roll, SetPoint_pitch = Control_Param.convert2bodyframe(SetPoint_roll, SetPoint_pitch, rotationvector[2])
if _t >= 1:
DroneConnector.send_drone_command(SetPoint_roll, SetPoint_pitch, SetPoint_yawrate, SetPoint_thrust)
else:
DroneConnector.send_drone_command(0,0,0,0)
return _statevector
if __name__ == '__main__':
OBJECT="gr562"
client = ViconDataStream.Client()
client.Connect( "192.168.1.33:801")
client.SetBufferSize( 1 )
client.EnableSegmentData()
HasFrame = False
print("Searching for frame.....")
while not HasFrame:
try:
client.GetFrame()
list,occ= client.GetSegmentGlobalTranslation(OBJECT, OBJECT )
statevector=[float(list[0])/1000,float(list[1])/1000,float(list[2])/1000]
list_rpy,occ= client.GetSegmentGlobalRotationEulerXYZ(OBJECT, OBJECT )
rotationvector=[float(list_rpy[0]),float(list_rpy[1]),float(list_rpy[2])]
HasFrame = True
except ViconDataStream.DataStreamException as e:
client.GetFrame()
client.SetAxisMapping( ViconDataStream.Client.AxisMapping.EForward, ViconDataStream.Client.AxisMapping.ELeft, ViconDataStream.Client.AxisMapping.EUp )
Control_Param = ControlParameters(statevector)
# Initialize the low-level drivers
cflib.crtp.init_drivers()
DroneConnector = DroneCommands(uri)
interals = 0
print("Starting trajectory sequence")
for thrust in Control_Param.inputs_array[2]:
start_time = time.time()
statevector = Controller(statevector, interals)
print(statevector[0], ',', statevector[1], ',', statevector[2], ',',Control_Param.x_array[0][interals],',',Control_Param.y_array[0][interals],',',Control_Param.z_array[0][interals], ',', interals)
interals += 1
result = time.time() - start_time
if (result < 1/Control_Param.Hz):
time.sleep((1/Control_Param.Hz) - result)
else:
print("code too slow")
print("Frequence was " + str(1/result) + " Hz")
output=client.Disconnect()
DroneConnector.send_drone_command(0,0,0,0)
|
test_singletonperthread.py | import uuid
from multiprocessing import Queue
from threading import Thread, currentThread
from azurelinuxagent.common.singletonperthread import SingletonPerThread
from tests.tools import AgentTestCase, clear_singleton_instances
class TestClassToTestSingletonPerThread(SingletonPerThread): # pylint: disable=too-few-public-methods
"""
Since these tests deal with testing in a multithreaded environment,
we employ the use of multiprocessing.Queue() to ensure that the data is consistent.
This test class uses a uuid to identify an object instead of directly using object reference because
Queue.get() returns a different object reference than what is put in it even though the object is same
(which is verified using uuid in this test class)
Eg:
obj1 = WireClient("obj1")
obj1
<__main__.WireClient object at 0x7f5e78476198>
q = Queue()
q.put(obj1)
test1 = q.get()
test1
<__main__.WireClient object at 0x7f5e78430630>
test1.endpoint == obj1.endpoint
True
"""
def __init__(self):
# Set the name of the object to the current thread name
self.name = currentThread().getName()
# Unique identifier for a class object
self.uuid = str(uuid.uuid4())
class TestSingletonPerThread(AgentTestCase):
THREAD_NAME_1 = 'thread-1'
THREAD_NAME_2 = 'thread-2'
def setUp(self):
super(TestSingletonPerThread, self).setUp()
# In a multi-threaded environment, exceptions thrown in the child thread will not be propagated to the parent
# thread. In order to achieve that, adding all exceptions to a Queue and then checking that in parent thread.
self.errors = Queue()
clear_singleton_instances(TestClassToTestSingletonPerThread)
def _setup_multithread_and_execute(self, func1, args1, func2, args2, t1_name=None, t2_name=None): # pylint: disable=too-many-arguments
t1 = Thread(target=func1, args=args1) # pylint: disable=invalid-name
t2 = Thread(target=func2, args=args2) # pylint: disable=invalid-name
t1.setName(t1_name if t1_name else self.THREAD_NAME_1)
t2.setName(t2_name if t2_name else self.THREAD_NAME_2)
t1.start()
t2.start()
t1.join()
t2.join()
errs = []
while not self.errors.empty():
errs.append(self.errors.get())
if len(errs) > 0: # pylint: disable=len-as-condition
raise Exception("Errors: %s" % ' , '.join(errs))
@staticmethod
def _get_test_class_instance(q, err): # pylint: disable=invalid-name
try:
obj = TestClassToTestSingletonPerThread()
q.put(obj)
except Exception as e: # pylint: disable=invalid-name
err.put(str(e))
def _parse_instances_and_return_thread_objects(self, instances, t1_name=None, t2_name=None):
obj1, obj2 = instances.get(), instances.get()
def check_obj(name):
if obj1.name == name: # pylint: disable=no-else-return
return obj1
elif obj2.name == name:
return obj2
else:
return None
t1_object = check_obj(t1_name if t1_name else self.THREAD_NAME_1)
t2_object = check_obj(t2_name if t2_name else self.THREAD_NAME_2)
return t1_object, t2_object
def test_it_should_have_only_one_instance_for_same_thread(self):
obj1 = TestClassToTestSingletonPerThread()
obj2 = TestClassToTestSingletonPerThread()
self.assertEqual(obj1.uuid, obj2.uuid)
def test_it_should_have_multiple_instances_for_multiple_threads(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
self.assertEqual(2, instances.qsize()) # Assert that there are 2 objects in the queue
obj1, obj2 = instances.get(), instances.get()
self.assertNotEqual(obj1.uuid, obj2.uuid)
def test_it_should_return_existing_instance_for_new_thread_with_same_name(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances)
new_instances = Queue()
# The 2nd call is to get new objects with the same thread name to verify if the objects are same
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(new_instances, self.errors),
func2=self._get_test_class_instance,
args2=(new_instances, self.errors))
new_t1_obj, new_t2_obj = self._parse_instances_and_return_thread_objects(new_instances)
self.assertEqual(t1_obj.name, new_t1_obj.name)
self.assertEqual(t1_obj.uuid, new_t1_obj.uuid)
self.assertEqual(t2_obj.name, new_t2_obj.name)
self.assertEqual(t2_obj.uuid, new_t2_obj.uuid)
def test_singleton_object_should_match_thread_name(self):
instances = Queue()
t1_name = str(uuid.uuid4())
t2_name = str(uuid.uuid4())
test_class_obj_name = lambda t_name: "%s__%s" % (TestClassToTestSingletonPerThread.__name__, t_name)
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors),
t1_name=t1_name,
t2_name=t2_name)
singleton_instances = TestClassToTestSingletonPerThread._instances # pylint: disable=protected-access,no-member
# Assert instance names are consistent with the thread names
self.assertIn(test_class_obj_name(t1_name), singleton_instances)
self.assertIn(test_class_obj_name(t2_name), singleton_instances)
# Assert that the objects match their respective threads
# This function matches objects with their thread names and returns the respective object or None if not found
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances, t1_name, t2_name)
# Ensure that objects for both the threads were found
self.assertIsNotNone(t1_obj)
self.assertIsNotNone(t2_obj)
# Ensure that the objects match with their respective thread objects
self.assertEqual(singleton_instances[test_class_obj_name(t1_name)].uuid, t1_obj.uuid)
self.assertEqual(singleton_instances[test_class_obj_name(t2_name)].uuid, t2_obj.uuid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.